Merge branch 'main' of github.com:CherryHQ/cherry-studio into chore/ci-auto-i18n

This commit is contained in:
icarus 2025-09-04 17:34:29 +08:00
commit 6a4de37728
125 changed files with 6212 additions and 1024 deletions

View File

@ -74,15 +74,23 @@
"@libsql/win32-x64-msvc": "^0.4.7",
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
"@strongtz/win32-arm64-msvc": "^0.4.7",
"cheerio": "^1.1.2",
"faiss-node": "^0.5.1",
"graceful-fs": "^4.2.11",
"html-to-text": "^9.0.5",
"htmlparser2": "^10.0.0",
"jsdom": "26.1.0",
"node-stream-zip": "^1.15.0",
"officeparser": "^4.2.0",
"os-proxy-config": "^1.1.2",
"pdf-parse": "^1.1.1",
"react-player": "^3.3.1",
"react-youtube": "^10.1.0",
"selection-hook": "^1.0.11",
"sharp": "^0.34.3",
"tesseract.js": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
"turndown": "7.2.0"
"turndown": "7.2.0",
"youtubei.js": "^15.0.1"
},
"devDependencies": {
"@agentic/exa": "^7.3.3",
@ -127,8 +135,10 @@
"@google/genai": "patch:@google/genai@npm%3A1.0.1#~/.yarn/patches/@google-genai-npm-1.0.1-e26f0f9af7.patch",
"@hello-pangea/dnd": "^18.0.1",
"@kangfenmao/keyv-storage": "^0.1.0",
"@langchain/community": "^0.3.36",
"@langchain/community": "^0.3.50",
"@langchain/core": "^0.3.68",
"@langchain/ollama": "^0.2.1",
"@langchain/openai": "^0.6.7",
"@mistralai/mistralai": "^1.7.5",
"@modelcontextprotocol/sdk": "^1.17.0",
"@mozilla/readability": "^0.6.0",
@ -171,9 +181,11 @@
"@types/cli-progress": "^3",
"@types/fs-extra": "^11",
"@types/he": "^1",
"@types/html-to-text": "^9",
"@types/lodash": "^4.17.5",
"@types/markdown-it": "^14",
"@types/md5": "^2.3.5",
"@types/mime-types": "^3",
"@types/node": "^22.17.1",
"@types/pako": "^1.0.2",
"@types/react": "^19.0.12",
@ -253,6 +265,7 @@
"markdown-it": "^14.1.0",
"mermaid": "^11.10.1",
"mime": "^4.0.4",
"mime-types": "^3.0.1",
"motion": "^12.10.5",
"notion-helper": "^1.3.22",
"npx-scope-finder": "^1.2.0",

View File

@ -7,7 +7,7 @@ export type LoaderReturn = {
loaderType: string
status?: ProcessingStatus
message?: string
messageSource?: 'preprocess' | 'embedding'
messageSource?: 'preprocess' | 'embedding' | 'validation'
}
export type FileChangeEventType = 'add' | 'change' | 'unlink' | 'addDir' | 'unlinkDir'

View File

@ -24,7 +24,7 @@ import DxtService from './services/DxtService'
import { ExportService } from './services/ExportService'
import { fileStorage as fileManager } from './services/FileStorage'
import FileService from './services/FileSystemService'
import KnowledgeService from './services/KnowledgeService'
import KnowledgeService from './services/knowledge/KnowledgeService'
import mcpService from './services/MCPService'
import MemoryService from './services/memory/MemoryService'
import { openTraceWindow, setTraceWindowTitle } from './services/NodeTraceService'
@ -524,7 +524,6 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
}
})
// knowledge base
ipcMain.handle(IpcChannel.KnowledgeBase_Create, KnowledgeService.create.bind(KnowledgeService))
ipcMain.handle(IpcChannel.KnowledgeBase_Reset, KnowledgeService.reset.bind(KnowledgeService))
ipcMain.handle(IpcChannel.KnowledgeBase_Delete, KnowledgeService.delete.bind(KnowledgeService))

View File

@ -0,0 +1,63 @@
import { VoyageEmbeddings } from '@langchain/community/embeddings/voyage'
import type { Embeddings } from '@langchain/core/embeddings'
import { OllamaEmbeddings } from '@langchain/ollama'
import { AzureOpenAIEmbeddings, OpenAIEmbeddings } from '@langchain/openai'
import { ApiClient, SystemProviderIds } from '@types'
import { isJinaEmbeddingsModel, JinaEmbeddings } from './JinaEmbeddings'
export default class EmbeddingsFactory {
static create({ embedApiClient, dimensions }: { embedApiClient: ApiClient; dimensions?: number }): Embeddings {
const batchSize = 10
const { model, provider, apiKey, apiVersion, baseURL } = embedApiClient
if (provider === SystemProviderIds.ollama) {
let baseUrl = baseURL
if (baseURL.includes('v1/')) {
baseUrl = baseURL.replace('v1/', '')
}
const headers = apiKey
? {
Authorization: `Bearer ${apiKey}`
}
: undefined
return new OllamaEmbeddings({
model: model,
baseUrl,
...headers
})
} else if (provider === SystemProviderIds.voyageai) {
return new VoyageEmbeddings({
modelName: model,
apiKey,
outputDimension: dimensions,
batchSize
})
}
if (isJinaEmbeddingsModel(model)) {
return new JinaEmbeddings({
model,
apiKey,
batchSize,
dimensions,
baseUrl: baseURL
})
}
if (apiVersion !== undefined) {
return new AzureOpenAIEmbeddings({
azureOpenAIApiKey: apiKey,
azureOpenAIApiVersion: apiVersion,
azureOpenAIApiDeploymentName: model,
azureOpenAIEndpoint: baseURL,
dimensions,
batchSize
})
}
return new OpenAIEmbeddings({
model,
apiKey,
dimensions,
batchSize,
configuration: { baseURL }
})
}
}

View File

@ -0,0 +1,199 @@
import { Embeddings, type EmbeddingsParams } from '@langchain/core/embeddings'
import { chunkArray } from '@langchain/core/utils/chunk_array'
import { getEnvironmentVariable } from '@langchain/core/utils/env'
import z from 'zod/v4'
const jinaModelSchema = z.union([
z.literal('jina-clip-v2'),
z.literal('jina-embeddings-v3'),
z.literal('jina-colbert-v2'),
z.literal('jina-clip-v1'),
z.literal('jina-colbert-v1-en'),
z.literal('jina-embeddings-v2-base-es'),
z.literal('jina-embeddings-v2-base-code'),
z.literal('jina-embeddings-v2-base-de'),
z.literal('jina-embeddings-v2-base-zh'),
z.literal('jina-embeddings-v2-base-en')
])
type JinaModel = z.infer<typeof jinaModelSchema>
export const isJinaEmbeddingsModel = (model: string): model is JinaModel => {
return jinaModelSchema.safeParse(model).success
}
interface JinaEmbeddingsParams extends EmbeddingsParams {
/** Model name to use */
model: JinaModel
baseUrl?: string
/**
* Timeout to use when making requests to Jina.
*/
timeout?: number
/**
* The maximum number of documents to embed in a single request.
*/
batchSize?: number
/**
* Whether to strip new lines from the input text.
*/
stripNewLines?: boolean
/**
* The dimensions of the embedding.
*/
dimensions?: number
/**
* Scales the embedding so its Euclidean (L2) norm becomes 1, preserving direction. Useful when downstream involves dot-product, classification, visualization..
*/
normalized?: boolean
}
type JinaMultiModelInput =
| {
text: string
image?: never
}
| {
image: string
text?: never
}
type JinaEmbeddingsInput = string | JinaMultiModelInput
interface EmbeddingCreateParams {
model: JinaEmbeddingsParams['model']
/**
* input can be strings or JinaMultiModelInputs,if you want embed image,you should use JinaMultiModelInputs
*/
input: JinaEmbeddingsInput[]
dimensions: number
task?: 'retrieval.query' | 'retrieval.passage'
}
interface EmbeddingResponse {
model: string
object: string
usage: {
total_tokens: number
prompt_tokens: number
}
data: {
object: string
index: number
embedding: number[]
}[]
}
interface EmbeddingErrorResponse {
detail: string
}
export class JinaEmbeddings extends Embeddings implements JinaEmbeddingsParams {
model: JinaEmbeddingsParams['model'] = 'jina-clip-v2'
batchSize = 24
baseUrl = 'https://api.jina.ai/v1/embeddings'
stripNewLines = true
dimensions = 1024
apiKey: string
constructor(
fields?: Partial<JinaEmbeddingsParams> & {
apiKey?: string
}
) {
const fieldsWithDefaults = { maxConcurrency: 2, ...fields }
super(fieldsWithDefaults)
const apiKey =
fieldsWithDefaults?.apiKey || getEnvironmentVariable('JINA_API_KEY') || getEnvironmentVariable('JINA_AUTH_TOKEN')
if (!apiKey) throw new Error('Jina API key not found')
this.apiKey = apiKey
this.baseUrl = fieldsWithDefaults?.baseUrl ? `${fieldsWithDefaults?.baseUrl}embeddings` : this.baseUrl
this.model = fieldsWithDefaults?.model ?? this.model
this.dimensions = fieldsWithDefaults?.dimensions ?? this.dimensions
this.batchSize = fieldsWithDefaults?.batchSize ?? this.batchSize
this.stripNewLines = fieldsWithDefaults?.stripNewLines ?? this.stripNewLines
}
private doStripNewLines(input: JinaEmbeddingsInput[]) {
if (this.stripNewLines) {
return input.map((i) => {
if (typeof i === 'string') {
return i.replace(/\n/g, ' ')
}
if (i.text) {
return { text: i.text.replace(/\n/g, ' ') }
}
return i
})
}
return input
}
async embedDocuments(input: JinaEmbeddingsInput[]): Promise<number[][]> {
const batches = chunkArray(this.doStripNewLines(input), this.batchSize)
const batchRequests = batches.map((batch) => {
const params = this.getParams(batch)
return this.embeddingWithRetry(params)
})
const batchResponses = await Promise.all(batchRequests)
const embeddings: number[][] = []
for (let i = 0; i < batchResponses.length; i += 1) {
const batch = batches[i]
const batchResponse = batchResponses[i] || []
for (let j = 0; j < batch.length; j += 1) {
embeddings.push(batchResponse[j])
}
}
return embeddings
}
async embedQuery(input: JinaEmbeddingsInput): Promise<number[]> {
const params = this.getParams(this.doStripNewLines([input]), true)
const embeddings = (await this.embeddingWithRetry(params)) || [[]]
return embeddings[0]
}
private getParams(input: JinaEmbeddingsInput[], query?: boolean): EmbeddingCreateParams {
return {
model: this.model,
input,
dimensions: this.dimensions,
task: query ? 'retrieval.query' : this.model === 'jina-clip-v2' ? undefined : 'retrieval.passage'
}
}
private async embeddingWithRetry(body: EmbeddingCreateParams) {
const response = await fetch(this.baseUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`
},
body: JSON.stringify(body)
})
const embeddingData: EmbeddingResponse | EmbeddingErrorResponse = await response.json()
if ('detail' in embeddingData && embeddingData.detail) {
throw new Error(`${embeddingData.detail}`)
}
return (embeddingData as EmbeddingResponse).data.map(({ embedding }) => embedding)
}
}

View File

@ -0,0 +1,25 @@
import type { Embeddings as BaseEmbeddings } from '@langchain/core/embeddings'
import { TraceMethod } from '@mcp-trace/trace-core'
import { ApiClient } from '@types'
import EmbeddingsFactory from './EmbeddingsFactory'
export default class TextEmbeddings {
private sdk: BaseEmbeddings
constructor({ embedApiClient, dimensions }: { embedApiClient: ApiClient; dimensions?: number }) {
this.sdk = EmbeddingsFactory.create({
embedApiClient,
dimensions
})
}
@TraceMethod({ spanName: 'embedDocuments', tag: 'Embeddings' })
public async embedDocuments(texts: string[]): Promise<number[][]> {
return this.sdk.embedDocuments(texts)
}
@TraceMethod({ spanName: 'embedQuery', tag: 'Embeddings' })
public async embedQuery(text: string): Promise<number[]> {
return this.sdk.embedQuery(text)
}
}

View File

@ -0,0 +1,97 @@
import { BaseDocumentLoader } from '@langchain/core/document_loaders/base'
import { Document } from '@langchain/core/documents'
import { readTextFileWithAutoEncoding } from '@main/utils/file'
import MarkdownIt from 'markdown-it'
export class MarkdownLoader extends BaseDocumentLoader {
private path: string
private md: MarkdownIt
constructor(path: string) {
super()
this.path = path
this.md = new MarkdownIt()
}
public async load(): Promise<Document[]> {
const content = await readTextFileWithAutoEncoding(this.path)
return this.parseMarkdown(content)
}
private parseMarkdown(content: string): Document[] {
const tokens = this.md.parse(content, {})
const documents: Document[] = []
let currentSection: {
heading?: string
level?: number
content: string
startLine?: number
} = { content: '' }
let i = 0
while (i < tokens.length) {
const token = tokens[i]
if (token.type === 'heading_open') {
// Save previous section if it has content
if (currentSection.content.trim()) {
documents.push(
new Document({
pageContent: currentSection.content.trim(),
metadata: {
source: this.path,
heading: currentSection.heading || 'Introduction',
level: currentSection.level || 0,
startLine: currentSection.startLine || 0
}
})
)
}
// Start new section
const level = parseInt(token.tag.slice(1)) // Extract number from h1, h2, etc.
const headingContent = tokens[i + 1]?.content || ''
currentSection = {
heading: headingContent,
level: level,
content: '',
startLine: token.map?.[0] || 0
}
// Skip heading_open, inline, heading_close tokens
i += 3
continue
}
// Add token content to current section
if (token.content) {
currentSection.content += token.content
}
// Add newlines for block tokens
if (token.block && token.type !== 'heading_close') {
currentSection.content += '\n'
}
i++
}
// Add the last section
if (currentSection.content.trim()) {
documents.push(
new Document({
pageContent: currentSection.content.trim(),
metadata: {
source: this.path,
heading: currentSection.heading || 'Introduction',
level: currentSection.level || 0,
startLine: currentSection.startLine || 0
}
})
)
}
return documents
}
}

View File

@ -0,0 +1,50 @@
import { BaseDocumentLoader } from '@langchain/core/document_loaders/base'
import { Document } from '@langchain/core/documents'
export class NoteLoader extends BaseDocumentLoader {
private text: string
private sourceUrl?: string
constructor(
public _text: string,
public _sourceUrl?: string
) {
super()
this.text = _text
this.sourceUrl = _sourceUrl
}
/**
* A protected method that takes a `raw` string as a parameter and returns
* a promise that resolves to an array containing the raw text as a single
* element.
* @param raw The raw text to be parsed.
* @returns A promise that resolves to an array containing the raw text as a single element.
*/
protected async parse(raw: string): Promise<string[]> {
return [raw]
}
public async load(): Promise<Document[]> {
const metadata = { source: this.sourceUrl || 'note' }
const parsed = await this.parse(this.text)
parsed.forEach((pageContent, i) => {
if (typeof pageContent !== 'string') {
throw new Error(`Expected string, at position ${i} got ${typeof pageContent}`)
}
})
return parsed.map(
(pageContent, i) =>
new Document({
pageContent,
metadata:
parsed.length === 1
? metadata
: {
...metadata,
line: i + 1
}
})
)
}
}

View File

@ -0,0 +1,170 @@
import { BaseDocumentLoader } from '@langchain/core/document_loaders/base'
import { Document } from '@langchain/core/documents'
import { Innertube } from 'youtubei.js'
// ... (接口定义 YoutubeConfig 和 VideoMetadata 保持不变)
/**
* Configuration options for the YoutubeLoader class. Includes properties
* such as the videoId, language, and addVideoInfo.
*/
interface YoutubeConfig {
videoId: string
language?: string
addVideoInfo?: boolean
// 新增一个选项,用于控制输出格式
transcriptFormat?: 'text' | 'srt'
}
/**
* Metadata of a YouTube video. Includes properties such as the source
* (videoId), description, title, view_count, author, and category.
*/
interface VideoMetadata {
source: string
description?: string
title?: string
view_count?: number
author?: string
category?: string
}
/**
* A document loader for loading data from YouTube videos. It uses the
* youtubei.js library to fetch the transcript and video metadata.
* @example
* ```typescript
* const loader = new YoutubeLoader({
* videoId: "VIDEO_ID",
* language: "en",
* addVideoInfo: true,
* transcriptFormat: "srt" // 获取 SRT 格式
* });
* const docs = await loader.load();
* console.log(docs[0].pageContent);
* ```
*/
export class YoutubeLoader extends BaseDocumentLoader {
private videoId: string
private language?: string
private addVideoInfo: boolean
// 新增格式化选项的私有属性
private transcriptFormat: 'text' | 'srt'
constructor(config: YoutubeConfig) {
super()
this.videoId = config.videoId
this.language = config?.language
this.addVideoInfo = config?.addVideoInfo ?? false
// 初始化格式化选项,默认为 'text' 以保持向后兼容
this.transcriptFormat = config?.transcriptFormat ?? 'text'
}
/**
* Extracts the videoId from a YouTube video URL.
* @param url The URL of the YouTube video.
* @returns The videoId of the YouTube video.
*/
private static getVideoID(url: string): string {
const match = url.match(/.*(?:youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=)([^#&?]*).*/)
if (match !== null && match[1].length === 11) {
return match[1]
} else {
throw new Error('Failed to get youtube video id from the url')
}
}
/**
* Creates a new instance of the YoutubeLoader class from a YouTube video
* URL.
* @param url The URL of the YouTube video.
* @param config Optional configuration options for the YoutubeLoader instance, excluding the videoId.
* @returns A new instance of the YoutubeLoader class.
*/
static createFromUrl(url: string, config?: Omit<YoutubeConfig, 'videoId'>): YoutubeLoader {
const videoId = YoutubeLoader.getVideoID(url)
return new YoutubeLoader({ ...config, videoId })
}
/**
* [] SRT (HH:MM:SS,ms)
* @param ms
* @returns
*/
private static formatTimestamp(ms: number): string {
const totalSeconds = Math.floor(ms / 1000)
const hours = Math.floor(totalSeconds / 3600)
.toString()
.padStart(2, '0')
const minutes = Math.floor((totalSeconds % 3600) / 60)
.toString()
.padStart(2, '0')
const seconds = (totalSeconds % 60).toString().padStart(2, '0')
const milliseconds = (ms % 1000).toString().padStart(3, '0')
return `${hours}:${minutes}:${seconds},${milliseconds}`
}
/**
* Loads the transcript and video metadata from the specified YouTube
* video. It can return the transcript as plain text or in SRT format.
* @returns An array of Documents representing the retrieved data.
*/
async load(): Promise<Document[]> {
const metadata: VideoMetadata = {
source: this.videoId
}
try {
const youtube = await Innertube.create({
lang: this.language,
retrieve_player: false
})
const info = await youtube.getInfo(this.videoId)
const transcriptData = await info.getTranscript()
if (!transcriptData.transcript.content?.body?.initial_segments) {
throw new Error('Transcript segments not found in the response.')
}
const segments = transcriptData.transcript.content.body.initial_segments
let pageContent: string
// 根据 transcriptFormat 选项决定如何格式化字幕
if (this.transcriptFormat === 'srt') {
// [修改] 将字幕片段格式化为 SRT 格式
pageContent = segments
.map((segment, index) => {
const srtIndex = index + 1
const startTime = YoutubeLoader.formatTimestamp(Number(segment.start_ms))
const endTime = YoutubeLoader.formatTimestamp(Number(segment.end_ms))
const text = segment.snippet?.text || '' // 使用 segment.snippet.text
return `${srtIndex}\n${startTime} --> ${endTime}\n${text}`
})
.join('\n\n') // 每个 SRT 块之间用两个换行符分隔
} else {
// [原始逻辑] 拼接为纯文本
pageContent = segments.map((segment) => segment.snippet?.text || '').join(' ')
}
if (this.addVideoInfo) {
const basicInfo = info.basic_info
metadata.description = basicInfo.short_description
metadata.title = basicInfo.title
metadata.view_count = basicInfo.view_count
metadata.author = basicInfo.author
}
const document = new Document({
pageContent,
metadata
})
return [document]
} catch (e: unknown) {
throw new Error(`Failed to get YouTube video transcription: ${(e as Error).message}`)
}
}
}

View File

@ -0,0 +1,236 @@
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx'
import { EPubLoader } from '@langchain/community/document_loaders/fs/epub'
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf'
import { PPTXLoader } from '@langchain/community/document_loaders/fs/pptx'
import { CheerioWebBaseLoader } from '@langchain/community/document_loaders/web/cheerio'
import { SitemapLoader } from '@langchain/community/document_loaders/web/sitemap'
import { FaissStore } from '@langchain/community/vectorstores/faiss'
import { Document } from '@langchain/core/documents'
import { loggerService } from '@logger'
import { UrlSource } from '@main/utils/knowledge'
import { LoaderReturn } from '@shared/config/types'
import { FileMetadata, FileTypes, KnowledgeBaseParams } from '@types'
import { randomUUID } from 'crypto'
import { JSONLoader } from 'langchain/document_loaders/fs/json'
import { TextLoader } from 'langchain/document_loaders/fs/text'
import { SplitterFactory } from '../splitter'
import { MarkdownLoader } from './MarkdownLoader'
import { NoteLoader } from './NoteLoader'
import { YoutubeLoader } from './YoutubeLoader'
const logger = loggerService.withContext('KnowledgeService File Loader')
type LoaderInstance =
| TextLoader
| PDFLoader
| PPTXLoader
| DocxLoader
| JSONLoader
| EPubLoader
| CheerioWebBaseLoader
| YoutubeLoader
| SitemapLoader
| NoteLoader
| MarkdownLoader
/**
* metadata
*/
function formatDocument(docs: Document[], type: string): Document[] {
return docs.map((doc) => ({
...doc,
metadata: {
...doc.metadata,
type: type
}
}))
}
/**
*
*/
async function processDocuments(
base: KnowledgeBaseParams,
vectorStore: FaissStore,
docs: Document[],
loaderType: string,
splitterType?: string
): Promise<LoaderReturn> {
const formattedDocs = formatDocument(docs, loaderType)
const splitter = SplitterFactory.create({
chunkSize: base.chunkSize,
chunkOverlap: base.chunkOverlap,
...(splitterType && { type: splitterType })
})
const splitterResults = await splitter.splitDocuments(formattedDocs)
const ids = splitterResults.map(() => randomUUID())
await vectorStore.addDocuments(splitterResults, { ids })
return {
entriesAdded: splitterResults.length,
uniqueId: ids[0] || '',
uniqueIds: ids,
loaderType
}
}
/**
*
*/
async function executeLoader(
base: KnowledgeBaseParams,
vectorStore: FaissStore,
loaderInstance: LoaderInstance,
loaderType: string,
identifier: string,
splitterType?: string
): Promise<LoaderReturn> {
const emptyResult: LoaderReturn = {
entriesAdded: 0,
uniqueId: '',
uniqueIds: [],
loaderType
}
try {
const docs = await loaderInstance.load()
return await processDocuments(base, vectorStore, docs, loaderType, splitterType)
} catch (error) {
logger.error(`Error loading or processing ${identifier} with loader ${loaderType}: ${error}`)
return emptyResult
}
}
/**
*
*/
const FILE_LOADER_MAP: Record<string, { loader: new (path: string) => LoaderInstance; type: string }> = {
'.pdf': { loader: PDFLoader, type: 'pdf' },
'.txt': { loader: TextLoader, type: 'text' },
'.pptx': { loader: PPTXLoader, type: 'pptx' },
'.docx': { loader: DocxLoader, type: 'docx' },
'.doc': { loader: DocxLoader, type: 'doc' },
'.json': { loader: JSONLoader, type: 'json' },
'.epub': { loader: EPubLoader, type: 'epub' },
'.md': { loader: MarkdownLoader, type: 'markdown' }
}
export async function addFileLoader(
base: KnowledgeBaseParams,
vectorStore: FaissStore,
file: FileMetadata
): Promise<LoaderReturn> {
const fileExt = file.ext.toLowerCase()
const loaderConfig = FILE_LOADER_MAP[fileExt]
if (!loaderConfig) {
// 默认使用文本加载器
const loaderInstance = new TextLoader(file.path)
const type = fileExt.replace('.', '') || 'unknown'
return executeLoader(base, vectorStore, loaderInstance, type, file.path)
}
const loaderInstance = new loaderConfig.loader(file.path)
return executeLoader(base, vectorStore, loaderInstance, loaderConfig.type, file.path)
}
export async function addWebLoader(
base: KnowledgeBaseParams,
vectorStore: FaissStore,
url: string,
source: UrlSource
): Promise<LoaderReturn> {
let loaderInstance: CheerioWebBaseLoader | YoutubeLoader | undefined
let splitterType: string | undefined
switch (source) {
case 'normal':
loaderInstance = new CheerioWebBaseLoader(url)
break
case 'youtube':
loaderInstance = YoutubeLoader.createFromUrl(url, {
addVideoInfo: true,
transcriptFormat: 'srt',
language: 'zh'
})
splitterType = 'srt'
break
}
if (!loaderInstance) {
return {
entriesAdded: 0,
uniqueId: '',
uniqueIds: [],
loaderType: source
}
}
return executeLoader(base, vectorStore, loaderInstance, source, url, splitterType)
}
export async function addSitemapLoader(
base: KnowledgeBaseParams,
vectorStore: FaissStore,
url: string
): Promise<LoaderReturn> {
const loaderInstance = new SitemapLoader(url)
return executeLoader(base, vectorStore, loaderInstance, 'sitemap', url)
}
export async function addNoteLoader(
base: KnowledgeBaseParams,
vectorStore: FaissStore,
content: string,
sourceUrl: string
): Promise<LoaderReturn> {
const loaderInstance = new NoteLoader(content, sourceUrl)
return executeLoader(base, vectorStore, loaderInstance, 'note', sourceUrl)
}
export async function addVideoLoader(
base: KnowledgeBaseParams,
vectorStore: FaissStore,
files: FileMetadata[]
): Promise<LoaderReturn> {
const srtFile = files.find((f) => f.type === FileTypes.TEXT)
const videoFile = files.find((f) => f.type === FileTypes.VIDEO)
const emptyResult: LoaderReturn = {
entriesAdded: 0,
uniqueId: '',
uniqueIds: [],
loaderType: 'video'
}
if (!srtFile || !videoFile) {
return emptyResult
}
try {
const loaderInstance = new TextLoader(srtFile.path)
const originalDocs = await loaderInstance.load()
const docsWithVideoMeta = originalDocs.map(
(doc) =>
new Document({
...doc,
metadata: {
...doc.metadata,
video: {
path: videoFile.path,
name: videoFile.origin_name
}
}
})
)
return await processDocuments(base, vectorStore, docsWithVideoMeta, 'video', 'srt')
} catch (error) {
logger.error(`Error loading or processing file ${srtFile.path} with loader video: ${error}`)
return emptyResult
}
}

View File

@ -0,0 +1,55 @@
import { BM25Retriever } from '@langchain/community/retrievers/bm25'
import { FaissStore } from '@langchain/community/vectorstores/faiss'
import { BaseRetriever } from '@langchain/core/retrievers'
import { loggerService } from '@main/services/LoggerService'
import { type KnowledgeBaseParams } from '@types'
import { type Document } from 'langchain/document'
import { EnsembleRetriever } from 'langchain/retrievers/ensemble'
const logger = loggerService.withContext('RetrieverFactory')
export class RetrieverFactory {
/**
* LangChain (Retriever)
* @param base
* @param vectorStore
* @param documents BM25Retriever
* @returns BaseRetriever
*/
public createRetriever(base: KnowledgeBaseParams, vectorStore: FaissStore, documents: Document[]): BaseRetriever {
const retrieverType = base.retriever?.mode ?? 'hybrid'
const retrieverWeight = base.retriever?.weight ?? 0.5
const searchK = base.documentCount ?? 5
logger.info(`Creating retriever of type: ${retrieverType} with k=${searchK}`)
switch (retrieverType) {
case 'bm25':
if (documents.length === 0) {
throw new Error('BM25Retriever requires documents, but none were provided or found.')
}
logger.info('Create BM25 Retriever')
return BM25Retriever.fromDocuments(documents, { k: searchK })
case 'hybrid': {
if (documents.length === 0) {
logger.warn('No documents provided for BM25 part of hybrid search. Falling back to vector search only.')
return vectorStore.asRetriever(searchK)
}
const vectorstoreRetriever = vectorStore.asRetriever(searchK)
const bm25Retriever = BM25Retriever.fromDocuments(documents, { k: searchK })
logger.info('Create Hybrid Retriever')
return new EnsembleRetriever({
retrievers: [bm25Retriever, vectorstoreRetriever],
weights: [retrieverWeight, 1 - retrieverWeight]
})
}
case 'vector':
default:
logger.info('Create Vector Retriever')
return vectorStore.asRetriever(searchK)
}
}
}

View File

@ -0,0 +1,133 @@
import { Document } from '@langchain/core/documents'
import { TextSplitter, TextSplitterParams } from 'langchain/text_splitter'
// 定义一个接口来表示解析后的单个字幕片段
interface SrtSegment {
text: string
startTime: number // in seconds
endTime: number // in seconds
}
// 辅助函数:将 SRT 时间戳字符串 (HH:MM:SS,ms) 转换为秒
function srtTimeToSeconds(time: string): number {
const parts = time.split(':')
const secondsAndMs = parts[2].split(',')
const hours = parseInt(parts[0], 10)
const minutes = parseInt(parts[1], 10)
const seconds = parseInt(secondsAndMs[0], 10)
const milliseconds = parseInt(secondsAndMs[1], 10)
return hours * 3600 + minutes * 60 + seconds + milliseconds / 1000
}
export class SrtSplitter extends TextSplitter {
constructor(fields?: Partial<TextSplitterParams>) {
// 传入 chunkSize 和 chunkOverlap
super(fields)
}
splitText(): Promise<string[]> {
throw new Error('Method not implemented.')
}
// 核心方法:重写 splitDocuments 来实现自定义逻辑
async splitDocuments(documents: Document[]): Promise<Document[]> {
const allChunks: Document[] = []
for (const doc of documents) {
// 1. 解析 SRT 内容
const segments = this.parseSrt(doc.pageContent)
if (segments.length === 0) continue
// 2. 将字幕片段组合成块
const chunks = this.mergeSegmentsIntoChunks(segments, doc.metadata)
allChunks.push(...chunks)
}
return allChunks
}
// 辅助方法:解析整个 SRT 字符串
private parseSrt(srt: string): SrtSegment[] {
const segments: SrtSegment[] = []
const blocks = srt.trim().split(/\n\n/)
for (const block of blocks) {
const lines = block.split('\n')
if (lines.length < 3) continue
const timeMatch = lines[1].match(/(\d{2}:\d{2}:\d{2},\d{3}) --> (\d{2}:\d{2}:\d{2},\d{3})/)
if (!timeMatch) continue
const startTime = srtTimeToSeconds(timeMatch[1])
const endTime = srtTimeToSeconds(timeMatch[2])
const text = lines.slice(2).join(' ').trim()
segments.push({ text, startTime, endTime })
}
return segments
}
// 辅助方法:将解析后的片段合并成每 5 段一个块
private mergeSegmentsIntoChunks(segments: SrtSegment[], baseMetadata: Record<string, any>): Document[] {
const chunks: Document[] = []
let currentChunkText = ''
let currentChunkStartTime = 0
let currentChunkEndTime = 0
let segmentCount = 0
for (const segment of segments) {
if (segmentCount === 0) {
currentChunkStartTime = segment.startTime
}
currentChunkText += (currentChunkText ? ' ' : '') + segment.text
currentChunkEndTime = segment.endTime
segmentCount++
// 当累积到 5 段时,创建一个新的 Document
if (segmentCount === 5) {
const metadata: Record<string, any> = {
...baseMetadata,
startTime: currentChunkStartTime,
endTime: currentChunkEndTime
}
if (baseMetadata.source_url) {
metadata.source_url_with_timestamp = `${baseMetadata.source_url}?t=${Math.floor(currentChunkStartTime)}s`
}
chunks.push(
new Document({
pageContent: currentChunkText,
metadata
})
)
// 重置计数器和临时变量
currentChunkText = ''
currentChunkStartTime = 0
currentChunkEndTime = 0
segmentCount = 0
}
}
// 如果还有剩余的片段,创建最后一个 Document
if (segmentCount > 0) {
const metadata: Record<string, any> = {
...baseMetadata,
startTime: currentChunkStartTime,
endTime: currentChunkEndTime
}
if (baseMetadata.source_url) {
metadata.source_url_with_timestamp = `${baseMetadata.source_url}?t=${Math.floor(currentChunkStartTime)}s`
}
chunks.push(
new Document({
pageContent: currentChunkText,
metadata
})
)
}
return chunks
}
}

View File

@ -0,0 +1,31 @@
import { RecursiveCharacterTextSplitter, TextSplitter } from '@langchain/textsplitters'
import { SrtSplitter } from './SrtSplitter'
export type SplitterConfig = {
chunkSize?: number
chunkOverlap?: number
type?: 'recursive' | 'srt' | string
}
export class SplitterFactory {
/**
* Creates a TextSplitter instance based on the provided configuration.
* @param config - The configuration object specifying the splitter type and its parameters.
* @returns An instance of a TextSplitter, or null if no splitting is required.
*/
public static create(config: SplitterConfig): TextSplitter {
switch (config.type) {
case 'srt':
return new SrtSplitter({
chunkSize: config.chunkSize,
chunkOverlap: config.chunkOverlap
})
case 'recursive':
default:
return new RecursiveCharacterTextSplitter({
chunkSize: config.chunkSize,
chunkOverlap: config.chunkOverlap
})
}
}
}

View File

@ -0,0 +1,63 @@
import PreprocessProvider from '@main/knowledge/preprocess/PreprocessProvider'
import { loggerService } from '@main/services/LoggerService'
import { windowService } from '@main/services/WindowService'
import type { FileMetadata, KnowledgeBaseParams, KnowledgeItem } from '@types'
const logger = loggerService.withContext('PreprocessingService')
class PreprocessingService {
public async preprocessFile(
file: FileMetadata,
base: KnowledgeBaseParams,
item: KnowledgeItem,
userId: string
): Promise<FileMetadata> {
let fileToProcess: FileMetadata = file
// Check if preprocessing is configured and applicable (e.g., for PDFs)
if (base.preprocessProvider && file.ext.toLowerCase() === '.pdf') {
try {
const provider = new PreprocessProvider(base.preprocessProvider.provider, userId)
// Check if file has already been preprocessed
const alreadyProcessed = await provider.checkIfAlreadyProcessed(file)
if (alreadyProcessed) {
logger.debug(`File already preprocessed, using cached result: ${file.path}`)
return alreadyProcessed
}
// Execute preprocessing
logger.debug(`Starting preprocess for scanned PDF: ${file.path}`)
const { processedFile, quota } = await provider.parseFile(item.id, file)
fileToProcess = processedFile
// Notify the UI
const mainWindow = windowService.getMainWindow()
mainWindow?.webContents.send('file-preprocess-finished', {
itemId: item.id,
quota: quota
})
} catch (err) {
logger.error(`Preprocessing failed: ${err}`)
// If preprocessing fails, re-throw the error to be handled by the caller
throw new Error(`Preprocessing failed: ${err}`)
}
}
return fileToProcess
}
public async checkQuota(base: KnowledgeBaseParams, userId: string): Promise<number> {
try {
if (base.preprocessProvider && base.preprocessProvider.type === 'preprocess') {
const provider = new PreprocessProvider(base.preprocessProvider.provider, userId)
return await provider.checkQuota()
}
throw new Error('No preprocess provider configured')
} catch (err) {
logger.error(`Failed to check quota: ${err}`)
throw new Error(`Failed to check quota: ${err}`)
}
}
}
export const preprocessingService = new PreprocessingService()

View File

@ -1,101 +1,46 @@
import type { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import { KnowledgeBaseParams } from '@types'
import { DEFAULT_DOCUMENT_COUNT, DEFAULT_RELEVANT_SCORE } from '@main/utils/knowledge'
import { KnowledgeBaseParams, KnowledgeSearchResult } from '@types'
import { MultiModalDocument, RerankStrategy } from './strategies/RerankStrategy'
import { StrategyFactory } from './strategies/StrategyFactory'
export default abstract class BaseReranker {
protected base: KnowledgeBaseParams
protected strategy: RerankStrategy
constructor(base: KnowledgeBaseParams) {
if (!base.rerankApiClient) {
throw new Error('Rerank model is required')
}
this.base = base
this.strategy = StrategyFactory.createStrategy(base.rerankApiClient.provider)
}
abstract rerank(query: string, searchResults: ExtractChunkData[]): Promise<ExtractChunkData[]>
/**
* Get Rerank Request Url
*/
protected getRerankUrl() {
if (this.base.rerankApiClient?.provider === 'bailian') {
return 'https://dashscope.aliyuncs.com/api/v1/services/rerank/text-rerank/text-rerank'
}
let baseURL = this.base.rerankApiClient?.baseURL
if (baseURL && baseURL.endsWith('/')) {
// `/` 结尾强制使用rerankBaseURL
return `${baseURL}rerank`
}
if (baseURL && !baseURL.endsWith('/v1')) {
baseURL = `${baseURL}/v1`
}
return `${baseURL}/rerank`
abstract rerank(query: string, searchResults: KnowledgeSearchResult[]): Promise<KnowledgeSearchResult[]>
protected getRerankUrl(): string {
return this.strategy.buildUrl(this.base.rerankApiClient?.baseURL)
}
/**
* Get Rerank Request Body
*/
protected getRerankRequestBody(query: string, searchResults: ExtractChunkData[]) {
const provider = this.base.rerankApiClient?.provider
const documents = searchResults.map((doc) => doc.pageContent)
const topN = this.base.documentCount
if (provider === 'voyageai') {
return {
model: this.base.rerankApiClient?.model,
query,
documents,
top_k: topN
}
} else if (provider === 'bailian') {
return {
model: this.base.rerankApiClient?.model,
input: {
query,
documents
},
parameters: {
top_n: topN
}
}
} else if (provider?.includes('tei')) {
return {
query,
texts: documents,
return_text: true
}
} else {
return {
model: this.base.rerankApiClient?.model,
query,
documents,
top_n: topN
}
}
protected getRerankRequestBody(query: string, searchResults: KnowledgeSearchResult[]) {
const documents = this.buildDocuments(searchResults)
const topN = this.base.documentCount ?? DEFAULT_DOCUMENT_COUNT
const model = this.base.rerankApiClient?.model
return this.strategy.buildRequestBody(query, documents, topN, model)
}
private buildDocuments(searchResults: KnowledgeSearchResult[]): MultiModalDocument[] {
return searchResults.map((doc) => {
const document: MultiModalDocument = {}
/**
* Extract Rerank Result
*/
// 检查是否是图片类型,添加图片内容
if (doc.metadata?.type === 'image') {
document.image = doc.pageContent
} else {
document.text = doc.pageContent
}
return document
})
}
protected extractRerankResult(data: any) {
const provider = this.base.rerankApiClient?.provider
if (provider === 'bailian') {
return data.output.results
} else if (provider === 'voyageai') {
return data.data
} else if (provider?.includes('tei')) {
return data.map((item: any) => {
return {
index: item.index,
relevance_score: item.score
}
})
} else {
return data.results
}
return this.strategy.extractResults(data)
}
/**
@ -105,35 +50,30 @@ export default abstract class BaseReranker {
* @protected
*/
protected getRerankResult(
searchResults: ExtractChunkData[],
rerankResults: Array<{
index: number
relevance_score: number
}>
searchResults: KnowledgeSearchResult[],
rerankResults: Array<{ index: number; relevance_score: number }>
) {
const resultMap = new Map(rerankResults.map((result) => [result.index, result.relevance_score || 0]))
const resultMap = new Map(
rerankResults.map((result) => [result.index, result.relevance_score || DEFAULT_RELEVANT_SCORE])
)
return searchResults
.map((doc: ExtractChunkData, index: number) => {
const returenResults = searchResults
.map((doc: KnowledgeSearchResult, index: number) => {
const score = resultMap.get(index)
if (score === undefined) return undefined
return {
...doc,
score
}
return { ...doc, score }
})
.filter((doc): doc is ExtractChunkData => doc !== undefined)
.filter((doc): doc is KnowledgeSearchResult => doc !== undefined)
.sort((a, b) => b.score - a.score)
}
return returenResults
}
public defaultHeaders() {
return {
Authorization: `Bearer ${this.base.rerankApiClient?.apiKey}`,
'Content-Type': 'application/json'
}
}
protected formatErrorMessage(url: string, error: any, requestBody: any) {
const errorDetails = {
url: url,

View File

@ -1,19 +1,14 @@
import { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import { KnowledgeBaseParams } from '@types'
import { KnowledgeBaseParams, KnowledgeSearchResult } from '@types'
import { net } from 'electron'
import BaseReranker from './BaseReranker'
export default class GeneralReranker extends BaseReranker {
constructor(base: KnowledgeBaseParams) {
super(base)
}
public rerank = async (query: string, searchResults: ExtractChunkData[]): Promise<ExtractChunkData[]> => {
public rerank = async (query: string, searchResults: KnowledgeSearchResult[]): Promise<KnowledgeSearchResult[]> => {
const url = this.getRerankUrl()
const requestBody = this.getRerankRequestBody(query, searchResults)
try {
const response = await net.fetch(url, {
method: 'POST',

View File

@ -1,5 +1,4 @@
import type { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import { KnowledgeBaseParams } from '@types'
import { KnowledgeBaseParams, KnowledgeSearchResult } from '@types'
import GeneralReranker from './GeneralReranker'
@ -8,7 +7,7 @@ export default class Reranker {
constructor(base: KnowledgeBaseParams) {
this.sdk = new GeneralReranker(base)
}
public async rerank(query: string, searchResults: ExtractChunkData[]): Promise<ExtractChunkData[]> {
public async rerank(query: string, searchResults: KnowledgeSearchResult[]): Promise<KnowledgeSearchResult[]> {
return this.sdk.rerank(query, searchResults)
}
}

View File

@ -0,0 +1,18 @@
import { MultiModalDocument, RerankStrategy } from './RerankStrategy'
export class BailianStrategy implements RerankStrategy {
buildUrl(): string {
return 'https://dashscope.aliyuncs.com/api/v1/services/rerank/text-rerank/text-rerank'
}
buildRequestBody(query: string, documents: MultiModalDocument[], topN: number, model?: string) {
const textDocuments = documents.filter((d) => d.text).map((d) => d.text!)
return {
model,
input: { query, documents: textDocuments },
parameters: { top_n: topN }
}
}
extractResults(data: any) {
return data.output.results
}
}

View File

@ -0,0 +1,25 @@
import { MultiModalDocument, RerankStrategy } from './RerankStrategy'
export class DefaultStrategy implements RerankStrategy {
buildUrl(baseURL?: string): string {
if (baseURL && baseURL.endsWith('/')) {
return `${baseURL}rerank`
}
if (baseURL && !baseURL.endsWith('/v1')) {
baseURL = `${baseURL}/v1`
}
return `${baseURL}/rerank`
}
buildRequestBody(query: string, documents: MultiModalDocument[], topN: number, model?: string) {
const textDocuments = documents.filter((d) => d.text).map((d) => d.text!)
return {
model,
query,
documents: textDocuments,
top_n: topN
}
}
extractResults(data: any) {
return data.results
}
}

View File

@ -0,0 +1,33 @@
import { MultiModalDocument, RerankStrategy } from './RerankStrategy'
export class JinaStrategy implements RerankStrategy {
buildUrl(baseURL?: string): string {
if (baseURL && baseURL.endsWith('/')) {
return `${baseURL}rerank`
}
if (baseURL && !baseURL.endsWith('/v1')) {
baseURL = `${baseURL}/v1`
}
return `${baseURL}/rerank`
}
buildRequestBody(query: string, documents: MultiModalDocument[], topN: number, model?: string) {
if (model === 'jina-reranker-m0') {
return {
model,
query,
documents,
top_n: topN
}
}
const textDocuments = documents.filter((d) => d.text).map((d) => d.text!)
return {
model,
query,
documents: textDocuments,
top_n: topN
}
}
extractResults(data: any) {
return data.results
}
}

View File

@ -0,0 +1,9 @@
export interface MultiModalDocument {
text?: string
image?: string
}
export interface RerankStrategy {
buildUrl(baseURL?: string): string
buildRequestBody(query: string, documents: MultiModalDocument[], topN: number, model?: string): any
extractResults(data: any): Array<{ index: number; relevance_score: number }>
}

View File

@ -0,0 +1,25 @@
import { BailianStrategy } from './BailianStrategy'
import { DefaultStrategy } from './DefaultStrategy'
import { JinaStrategy } from './JinaStrategy'
import { RerankStrategy } from './RerankStrategy'
import { TEIStrategy } from './TeiStrategy'
import { isTEIProvider, RERANKER_PROVIDERS } from './types'
import { VoyageAIStrategy } from './VoyageStrategy'
export class StrategyFactory {
static createStrategy(provider?: string): RerankStrategy {
switch (provider) {
case RERANKER_PROVIDERS.VOYAGEAI:
return new VoyageAIStrategy()
case RERANKER_PROVIDERS.BAILIAN:
return new BailianStrategy()
case RERANKER_PROVIDERS.JINA:
return new JinaStrategy()
default:
if (isTEIProvider(provider)) {
return new TEIStrategy()
}
return new DefaultStrategy()
}
}
}

View File

@ -0,0 +1,26 @@
import { MultiModalDocument, RerankStrategy } from './RerankStrategy'
export class TEIStrategy implements RerankStrategy {
buildUrl(baseURL?: string): string {
if (baseURL && baseURL.endsWith('/')) {
return `${baseURL}rerank`
}
if (baseURL && !baseURL.endsWith('/v1')) {
baseURL = `${baseURL}/v1`
}
return `${baseURL}/rerank`
}
buildRequestBody(query: string, documents: MultiModalDocument[]) {
const textDocuments = documents.filter((d) => d.text).map((d) => d.text!)
return {
query,
texts: textDocuments,
return_text: true
}
}
extractResults(data: any) {
return data.map((item: any) => ({
index: item.index,
relevance_score: item.score
}))
}
}

View File

@ -0,0 +1,24 @@
import { MultiModalDocument, RerankStrategy } from './RerankStrategy'
export class VoyageAIStrategy implements RerankStrategy {
buildUrl(baseURL?: string): string {
if (baseURL && baseURL.endsWith('/')) {
return `${baseURL}rerank`
}
if (baseURL && !baseURL.endsWith('/v1')) {
baseURL = `${baseURL}/v1`
}
return `${baseURL}/rerank`
}
buildRequestBody(query: string, documents: MultiModalDocument[], topN: number, model?: string) {
const textDocuments = documents.filter((d) => d.text).map((d) => d.text!)
return {
model,
query,
documents: textDocuments,
top_k: topN
}
}
extractResults(data: any) {
return data.data
}
}

View File

@ -0,0 +1,19 @@
import { objectValues } from '@types'
export const RERANKER_PROVIDERS = {
VOYAGEAI: 'voyageai',
BAILIAN: 'bailian',
JINA: 'jina',
TEI: 'tei'
} as const
export type RerankProvider = (typeof RERANKER_PROVIDERS)[keyof typeof RERANKER_PROVIDERS]
export function isTEIProvider(provider?: string): boolean {
return provider?.includes(RERANKER_PROVIDERS.TEI) ?? false
}
export function isKnownProvider(provider?: string): provider is RerankProvider {
if (!provider) return false
return objectValues(RERANKER_PROVIDERS).some((p) => p === provider)
}

View File

@ -56,6 +56,45 @@ type CallToolArgs = { server: MCPServer; name: string; args: any; callId?: strin
const logger = loggerService.withContext('MCPService')
// Redact potentially sensitive fields in objects (headers, tokens, api keys)
function redactSensitive(input: any): any {
const SENSITIVE_KEYS = ['authorization', 'Authorization', 'apiKey', 'api_key', 'apikey', 'token', 'access_token']
const MAX_STRING = 300
const redact = (val: any): any => {
if (val == null) return val
if (typeof val === 'string') {
return val.length > MAX_STRING ? `${val.slice(0, MAX_STRING)}…<${val.length - MAX_STRING} more>` : val
}
if (Array.isArray(val)) return val.map((v) => redact(v))
if (typeof val === 'object') {
const out: Record<string, any> = {}
for (const [k, v] of Object.entries(val)) {
if (SENSITIVE_KEYS.includes(k)) {
out[k] = '<redacted>'
} else {
out[k] = redact(v)
}
}
return out
}
return val
}
return redact(input)
}
// Create a context-aware logger for a server
function getServerLogger(server: MCPServer, extra?: Record<string, any>) {
const base = {
serverName: server?.name,
serverId: server?.id,
baseUrl: server?.baseUrl,
type: server?.type || (server?.command ? 'stdio' : server?.baseUrl ? 'http' : 'inmemory')
}
return loggerService.withContext('MCPService', { ...base, ...(extra || {}) })
}
/**
* Higher-order function to add caching capability to any async function
* @param fn The original function to be wrapped with caching
@ -74,15 +113,17 @@ function withCache<T extends unknown[], R>(
const cacheKey = getCacheKey(...args)
if (CacheService.has(cacheKey)) {
logger.debug(`${logPrefix} loaded from cache`)
logger.debug(`${logPrefix} loaded from cache`, { cacheKey })
const cachedData = CacheService.get<R>(cacheKey)
if (cachedData) {
return cachedData
}
}
const start = Date.now()
const result = await fn(...args)
CacheService.set(cacheKey, result, ttl)
logger.debug(`${logPrefix} cached`, { cacheKey, ttlMs: ttl, durationMs: Date.now() - start })
return result
}
}
@ -128,6 +169,7 @@ class McpService {
// If there's a pending initialization, wait for it
const pendingClient = this.pendingClients.get(serverKey)
if (pendingClient) {
getServerLogger(server).silly(`Waiting for pending client initialization`)
return pendingClient
}
@ -136,8 +178,11 @@ class McpService {
if (existingClient) {
try {
// Check if the existing client is still connected
const pingResult = await existingClient.ping()
logger.debug(`Ping result for ${server.name}:`, pingResult)
const pingResult = await existingClient.ping({
// add short timeout to prevent hanging
timeout: 1000
})
getServerLogger(server).debug(`Ping result`, { ok: !!pingResult })
// If the ping fails, remove the client from the cache
// and create a new one
if (!pingResult) {
@ -146,7 +191,7 @@ class McpService {
return existingClient
}
} catch (error: any) {
logger.error(`Error pinging server ${server.name}:`, error?.message)
getServerLogger(server).error(`Error pinging server`, error as Error)
this.clients.delete(serverKey)
}
}
@ -172,15 +217,15 @@ class McpService {
> => {
// Create appropriate transport based on configuration
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
logger.debug(`Using in-memory transport for server: ${server.name}`)
getServerLogger(server).debug(`Using in-memory transport`)
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
// start the in-memory server with the given name and environment variables
const inMemoryServer = createInMemoryMCPServer(server.name, args, server.env || {})
try {
await inMemoryServer.connect(serverTransport)
logger.debug(`In-memory server started: ${server.name}`)
getServerLogger(server).debug(`In-memory server started`)
} catch (error: Error | any) {
logger.error(`Error starting in-memory server: ${error}`)
getServerLogger(server).error(`Error starting in-memory server`, error as Error)
throw new Error(`Failed to start in-memory server: ${error.message}`)
}
// set the client transport to the client
@ -193,7 +238,10 @@ class McpService {
},
authProvider
}
logger.debug(`StreamableHTTPClientTransport options:`, options)
// redact headers before logging
getServerLogger(server).debug(`StreamableHTTPClientTransport options`, {
options: redactSensitive(options)
})
return new StreamableHTTPClientTransport(new URL(server.baseUrl!), options)
} else if (server.type === 'sse') {
const options: SSEClientTransportOptions = {
@ -209,7 +257,7 @@ class McpService {
headers['Authorization'] = `Bearer ${tokens.access_token}`
}
} catch (error) {
logger.error('Failed to fetch tokens:', error as Error)
getServerLogger(server).error('Failed to fetch tokens:', error as Error)
}
}
@ -239,15 +287,18 @@ class McpService {
...server.env,
...resolvedConfig.env
}
logger.debug(`Using resolved DXT config - command: ${cmd}, args: ${args?.join(' ')}`)
getServerLogger(server).debug(`Using resolved DXT config`, {
command: cmd,
args
})
} else {
logger.warn(`Failed to resolve DXT config for ${server.name}, falling back to manifest values`)
getServerLogger(server).warn(`Failed to resolve DXT config, falling back to manifest values`)
}
}
if (server.command === 'npx') {
cmd = await getBinaryPath('bun')
logger.debug(`Using command: ${cmd}`)
getServerLogger(server).debug(`Using command`, { command: cmd })
// add -x to args if args exist
if (args && args.length > 0) {
@ -282,7 +333,7 @@ class McpService {
}
}
logger.debug(`Starting server with command: ${cmd} ${args ? args.join(' ') : ''}`)
getServerLogger(server).debug(`Starting server`, { command: cmd, args })
// Logger.info(`[MCP] Environment variables for server:`, server.env)
const loginShellEnv = await this.getLoginShellEnv()
@ -304,12 +355,14 @@ class McpService {
// For DXT servers, set the working directory to the extracted path
if (server.dxtPath) {
transportOptions.cwd = server.dxtPath
logger.debug(`Setting working directory for DXT server: ${server.dxtPath}`)
getServerLogger(server).debug(`Setting working directory for DXT server`, {
cwd: server.dxtPath
})
}
const stdioTransport = new StdioClientTransport(transportOptions)
stdioTransport.stderr?.on('data', (data) =>
logger.debug(`Stdio stderr for server: ${server.name}` + data.toString())
getServerLogger(server).debug(`Stdio stderr`, { data: data.toString() })
)
return stdioTransport
} else {
@ -318,7 +371,7 @@ class McpService {
}
const handleAuth = async (client: Client, transport: SSEClientTransport | StreamableHTTPClientTransport) => {
logger.debug(`Starting OAuth flow for server: ${server.name}`)
getServerLogger(server).debug(`Starting OAuth flow`)
// Create an event emitter for the OAuth callback
const events = new EventEmitter()
@ -331,27 +384,27 @@ class McpService {
// Set a timeout to close the callback server
const timeoutId = setTimeout(() => {
logger.warn(`OAuth flow timed out for server: ${server.name}`)
getServerLogger(server).warn(`OAuth flow timed out`)
callbackServer.close()
}, 300000) // 5 minutes timeout
try {
// Wait for the authorization code
const authCode = await callbackServer.waitForAuthCode()
logger.debug(`Received auth code: ${authCode}`)
getServerLogger(server).debug(`Received auth code`)
// Complete the OAuth flow
await transport.finishAuth(authCode)
logger.debug(`OAuth flow completed for server: ${server.name}`)
getServerLogger(server).debug(`OAuth flow completed`)
const newTransport = await initTransport()
// Try to connect again
await client.connect(newTransport)
logger.debug(`Successfully authenticated with server: ${server.name}`)
getServerLogger(server).debug(`Successfully authenticated`)
} catch (oauthError) {
logger.error(`OAuth authentication failed for server ${server.name}:`, oauthError as Error)
getServerLogger(server).error(`OAuth authentication failed`, oauthError as Error)
throw new Error(
`OAuth authentication failed: ${oauthError instanceof Error ? oauthError.message : String(oauthError)}`
)
@ -390,7 +443,7 @@ class McpService {
logger.debug(`Activated server: ${server.name}`)
return client
} catch (error: any) {
logger.error(`Error activating server ${server.name}:`, error?.message)
getServerLogger(server).error(`Error activating server`, error as Error)
throw new Error(`[MCP] Error activating server ${server.name}: ${error.message}`)
}
} finally {
@ -450,9 +503,9 @@ class McpService {
logger.debug(`Message from server ${server.name}:`, notification.params)
})
logger.debug(`Set up notification handlers for server: ${server.name}`)
getServerLogger(server).debug(`Set up notification handlers`)
} catch (error) {
logger.error(`Failed to set up notification handlers for server ${server.name}:`, error as Error)
getServerLogger(server).error(`Failed to set up notification handlers`, error as Error)
}
}
@ -470,7 +523,7 @@ class McpService {
CacheService.remove(`mcp:list_tool:${serverKey}`)
CacheService.remove(`mcp:list_prompts:${serverKey}`)
CacheService.remove(`mcp:list_resources:${serverKey}`)
logger.debug(`Cleared all caches for server: ${serverKey}`)
logger.debug(`Cleared all caches for server`, { serverKey })
}
async closeClient(serverKey: string) {
@ -478,18 +531,18 @@ class McpService {
if (client) {
// Remove the client from the cache
await client.close()
logger.debug(`Closed server: ${serverKey}`)
logger.debug(`Closed server`, { serverKey })
this.clients.delete(serverKey)
// Clear all caches for this server
this.clearServerCache(serverKey)
} else {
logger.warn(`No client found for server: ${serverKey}`)
logger.warn(`No client found for server`, { serverKey })
}
}
async stopServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
const serverKey = this.getServerKey(server)
logger.debug(`Stopping server: ${server.name}`)
getServerLogger(server).debug(`Stopping server`)
await this.closeClient(serverKey)
}
@ -505,16 +558,16 @@ class McpService {
try {
const cleaned = this.dxtService.cleanupDxtServer(server.name)
if (cleaned) {
logger.debug(`Cleaned up DXT server directory for: ${server.name}`)
getServerLogger(server).debug(`Cleaned up DXT server directory`)
}
} catch (error) {
logger.error(`Failed to cleanup DXT server: ${server.name}`, error as Error)
getServerLogger(server).error(`Failed to cleanup DXT server`, error as Error)
}
}
}
async restartServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
logger.debug(`Restarting server: ${server.name}`)
getServerLogger(server).debug(`Restarting server`)
const serverKey = this.getServerKey(server)
await this.closeClient(serverKey)
// Clear cache before restarting to ensure fresh data
@ -527,7 +580,7 @@ class McpService {
try {
await this.closeClient(key)
} catch (error: any) {
logger.error(`Failed to close client: ${error?.message}`)
logger.error(`Failed to close client`, error as Error)
}
}
}
@ -536,9 +589,9 @@ class McpService {
* Check connectivity for an MCP server
*/
public async checkMcpConnectivity(_: Electron.IpcMainInvokeEvent, server: MCPServer): Promise<boolean> {
logger.debug(`Checking connectivity for server: ${server.name}`)
getServerLogger(server).debug(`Checking connectivity`)
try {
logger.debug(`About to call initClient for server: ${server.name}`, { hasInitClient: !!this.initClient })
getServerLogger(server).debug(`About to call initClient`, { hasInitClient: !!this.initClient })
if (!this.initClient) {
throw new Error('initClient method is not available')
@ -547,10 +600,10 @@ class McpService {
const client = await this.initClient(server)
// Attempt to list tools as a way to check connectivity
await client.listTools()
logger.debug(`Connectivity check successful for server: ${server.name}`)
getServerLogger(server).debug(`Connectivity check successful`)
return true
} catch (error) {
logger.error(`Connectivity check failed for server: ${server.name}`, error as Error)
getServerLogger(server).error(`Connectivity check failed`, error as Error)
// Close the client if connectivity check fails to ensure a clean state for the next attempt
const serverKey = this.getServerKey(server)
await this.closeClient(serverKey)
@ -559,9 +612,8 @@ class McpService {
}
private async listToolsImpl(server: MCPServer): Promise<MCPTool[]> {
logger.debug(`Listing tools for server: ${server.name}`)
getServerLogger(server).debug(`Listing tools`)
const client = await this.initClient(server)
logger.debug(`Client for server: ${server.name}`, client)
try {
const { tools } = await client.listTools()
const serverTools: MCPTool[] = []
@ -577,7 +629,7 @@ class McpService {
})
return serverTools
} catch (error: any) {
logger.error(`Failed to list tools for server: ${server.name}`, error?.message)
getServerLogger(server).error(`Failed to list tools`, error as Error)
return []
}
}
@ -614,12 +666,16 @@ class McpService {
const callToolFunc = async ({ server, name, args }: CallToolArgs) => {
try {
logger.debug(`Calling: ${server.name} ${name} ${JSON.stringify(args)} callId: ${toolCallId}`, server)
getServerLogger(server, { tool: name, callId: toolCallId }).debug(`Calling tool`, {
args: redactSensitive(args)
})
if (typeof args === 'string') {
try {
args = JSON.parse(args)
} catch (e) {
logger.error('args parse error', args)
getServerLogger(server, { tool: name, callId: toolCallId }).error('args parse error', e as Error, {
args
})
}
if (args === '') {
args = {}
@ -628,8 +684,9 @@ class McpService {
const client = await this.initClient(server)
const result = await client.callTool({ name, arguments: args }, undefined, {
onprogress: (process) => {
logger.debug(`Progress: ${process.progress / (process.total || 1)}`)
logger.debug(`Progress notification received for server: ${server.name}`, process)
getServerLogger(server, { tool: name, callId: toolCallId }).debug(`Progress`, {
ratio: process.progress / (process.total || 1)
})
const mainWindow = windowService.getMainWindow()
if (mainWindow) {
mainWindow.webContents.send('mcp-progress', process.progress / (process.total || 1))
@ -644,7 +701,7 @@ class McpService {
})
return result as MCPCallToolResponse
} catch (error) {
logger.error(`Error calling tool ${name} on ${server.name}:`, error as Error)
getServerLogger(server, { tool: name, callId: toolCallId }).error(`Error calling tool`, error as Error)
throw error
} finally {
this.activeToolCalls.delete(toolCallId)
@ -668,7 +725,7 @@ class McpService {
*/
private async listPromptsImpl(server: MCPServer): Promise<MCPPrompt[]> {
const client = await this.initClient(server)
logger.debug(`Listing prompts for server: ${server.name}`)
getServerLogger(server).debug(`Listing prompts`)
try {
const { prompts } = await client.listPrompts()
return prompts.map((prompt: any) => ({
@ -680,7 +737,7 @@ class McpService {
} catch (error: any) {
// -32601 is the code for the method not found
if (error?.code !== -32601) {
logger.error(`Failed to list prompts for server: ${server.name}`, error?.message)
getServerLogger(server).error(`Failed to list prompts`, error as Error)
}
return []
}
@ -749,7 +806,7 @@ class McpService {
} catch (error: any) {
// -32601 is the code for the method not found
if (error?.code !== -32601) {
logger.error(`Failed to list resources for server: ${server.name}`, error?.message)
getServerLogger(server).error(`Failed to list resources`, error as Error)
}
return []
}
@ -775,7 +832,7 @@ class McpService {
* Get a specific resource from an MCP server (implementation)
*/
private async getResourceImpl(server: MCPServer, uri: string): Promise<GetResourceResponse> {
logger.debug(`Getting resource ${uri} from server: ${server.name}`)
getServerLogger(server, { uri }).debug(`Getting resource`)
const client = await this.initClient(server)
try {
const result = await client.readResource({ uri: uri })
@ -793,7 +850,7 @@ class McpService {
contents: contents
}
} catch (error: Error | any) {
logger.error(`Failed to get resource ${uri} from server: ${server.name}`, error.message)
getServerLogger(server, { uri }).error(`Failed to get resource`, error as Error)
throw new Error(`Failed to get resource ${uri} from server: ${server.name}: ${error.message}`)
}
}
@ -838,10 +895,10 @@ class McpService {
if (activeToolCall) {
activeToolCall.abort()
this.activeToolCalls.delete(callId)
logger.debug(`Aborted tool call: ${callId}`)
logger.debug(`Aborted tool call`, { callId })
return true
} else {
logger.warn(`No active tool call found for callId: ${callId}`)
logger.warn(`No active tool call found for callId`, { callId })
return false
}
}
@ -851,22 +908,22 @@ class McpService {
*/
public async getServerVersion(_: Electron.IpcMainInvokeEvent, server: MCPServer): Promise<string | null> {
try {
logger.debug(`Getting server version for: ${server.name}`)
getServerLogger(server).debug(`Getting server version`)
const client = await this.initClient(server)
// Try to get server information which may include version
const serverInfo = client.getServerVersion()
logger.debug(`Server info for ${server.name}:`, serverInfo)
getServerLogger(server).debug(`Server info`, redactSensitive(serverInfo))
if (serverInfo && serverInfo.version) {
logger.debug(`Server version for ${server.name}: ${serverInfo.version}`)
getServerLogger(server).debug(`Server version`, { version: serverInfo.version })
return serverInfo.version
}
logger.warn(`No version information available for server: ${server.name}`)
getServerLogger(server).warn(`No version information available`)
return null
} catch (error: any) {
logger.error(`Failed to get server version for ${server.name}:`, error?.message)
getServerLogger(server).error(`Failed to get server version`, error as Error)
return null
}
}

View File

@ -1,111 +1,40 @@
/**
* Knowledge Service - Manages knowledge bases using RAG (Retrieval-Augmented Generation)
*
* This service handles creation, management, and querying of knowledge bases from various sources
* including files, directories, URLs, sitemaps, and notes.
*
* Features:
* - Concurrent task processing with workload management
* - Multiple data source support
* - Vector database integration
*
* For detailed documentation, see:
* @see {@link ../../../docs/technical/KnowledgeService.md}
*/
import * as fs from 'node:fs'
import path from 'node:path'
import { RAGApplication, RAGApplicationBuilder } from '@cherrystudio/embedjs'
import type { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import { LibSqlDb } from '@cherrystudio/embedjs-libsql'
import { SitemapLoader } from '@cherrystudio/embedjs-loader-sitemap'
import { WebLoader } from '@cherrystudio/embedjs-loader-web'
import { loggerService } from '@logger'
import Embeddings from '@main/knowledge/embeddings/Embeddings'
import { addFileLoader } from '@main/knowledge/loader'
import { NoteLoader } from '@main/knowledge/loader/noteLoader'
import PreprocessProvider from '@main/knowledge/preprocess/PreprocessProvider'
import Reranker from '@main/knowledge/reranker/Reranker'
import { fileStorage } from '@main/services/FileStorage'
import { windowService } from '@main/services/WindowService'
import { getDataPath } from '@main/utils'
import Embeddings from '@main/knowledge/embedjs/embeddings/Embeddings'
import { addFileLoader } from '@main/knowledge/embedjs/loader'
import { NoteLoader } from '@main/knowledge/embedjs/loader/noteLoader'
import { preprocessingService } from '@main/knowledge/preprocess/PreprocessingService'
import { getAllFiles } from '@main/utils/file'
import { TraceMethod } from '@mcp-trace/trace-core'
import { MB } from '@shared/config/constant'
import type { LoaderReturn } from '@shared/config/types'
import { LoaderReturn } from '@shared/config/types'
import { IpcChannel } from '@shared/IpcChannel'
import { FileMetadata, KnowledgeBaseParams, KnowledgeItem } from '@types'
import { FileMetadata, KnowledgeBaseParams, KnowledgeSearchResult } from '@types'
import { v4 as uuidv4 } from 'uuid'
import { windowService } from '../WindowService'
import {
IKnowledgeFramework,
KnowledgeBaseAddItemOptionsNonNullableAttribute,
LoaderDoneReturn,
LoaderTask,
LoaderTaskItem,
LoaderTaskItemState
} from './IKnowledgeFramework'
const logger = loggerService.withContext('MainKnowledgeService')
export interface KnowledgeBaseAddItemOptions {
base: KnowledgeBaseParams
item: KnowledgeItem
forceReload?: boolean
userId?: string
}
interface KnowledgeBaseAddItemOptionsNonNullableAttribute {
base: KnowledgeBaseParams
item: KnowledgeItem
forceReload: boolean
userId: string
}
interface EvaluateTaskWorkload {
workload: number
}
type LoaderDoneReturn = LoaderReturn | null
enum LoaderTaskItemState {
PENDING,
PROCESSING,
DONE
}
interface LoaderTaskItem {
state: LoaderTaskItemState
task: () => Promise<unknown>
evaluateTaskWorkload: EvaluateTaskWorkload
}
interface LoaderTask {
loaderTasks: LoaderTaskItem[]
loaderDoneReturn: LoaderDoneReturn
}
interface LoaderTaskOfSet {
loaderTasks: Set<LoaderTaskItem>
loaderDoneReturn: LoaderDoneReturn
}
interface QueueTaskItem {
taskPromise: () => Promise<unknown>
resolve: () => void
evaluateTaskWorkload: EvaluateTaskWorkload
}
const loaderTaskIntoOfSet = (loaderTask: LoaderTask): LoaderTaskOfSet => {
return {
loaderTasks: new Set(loaderTask.loaderTasks),
loaderDoneReturn: loaderTask.loaderDoneReturn
}
}
class KnowledgeService {
private storageDir = path.join(getDataPath(), 'KnowledgeBase')
private pendingDeleteFile = path.join(this.storageDir, 'knowledge_pending_delete.json')
// Byte based
private workload = 0
private processingItemCount = 0
private knowledgeItemProcessingQueueMappingPromise: Map<LoaderTaskOfSet, () => void> = new Map()
export class EmbedJsFramework implements IKnowledgeFramework {
private storageDir: string
private ragApplications: Map<string, RAGApplication> = new Map()
private pendingDeleteFile: string
private dbInstances: Map<string, LibSqlDb> = new Map()
private static MAXIMUM_WORKLOAD = 80 * MB
private static MAXIMUM_PROCESSING_ITEM_COUNT = 30
private static ERROR_LOADER_RETURN: LoaderReturn = {
entriesAdded: 0,
uniqueId: '',
@ -114,7 +43,9 @@ class KnowledgeService {
status: 'failed'
}
constructor() {
constructor(storageDir: string) {
this.storageDir = storageDir
this.pendingDeleteFile = path.join(this.storageDir, 'knowledge_pending_delete.json')
this.initStorageDir()
this.cleanupOnStartup()
}
@ -229,33 +160,28 @@ class KnowledgeService {
logger.info(`Startup cleanup completed: ${deletedCount}/${pendingDeleteIds.length} knowledge bases deleted`)
}
private getRagApplication = async ({
id,
embedApiClient,
dimensions,
documentCount
}: KnowledgeBaseParams): Promise<RAGApplication> => {
if (this.ragApplications.has(id)) {
return this.ragApplications.get(id)!
private async getRagApplication(base: KnowledgeBaseParams): Promise<RAGApplication> {
if (this.ragApplications.has(base.id)) {
return this.ragApplications.get(base.id)!
}
let ragApplication: RAGApplication
const embeddings = new Embeddings({
embedApiClient,
dimensions
embedApiClient: base.embedApiClient,
dimensions: base.dimensions
})
try {
const libSqlDb = new LibSqlDb({ path: path.join(this.storageDir, id) })
const libSqlDb = new LibSqlDb({ path: path.join(this.storageDir, base.id) })
// Save database instance for later closing
this.dbInstances.set(id, libSqlDb)
this.dbInstances.set(base.id, libSqlDb)
ragApplication = await new RAGApplicationBuilder()
.setModel('NO_MODEL')
.setEmbeddingModel(embeddings)
.setVectorDatabase(libSqlDb)
.setSearchResultCount(documentCount || 30)
.setSearchResultCount(base.documentCount || 30)
.build()
this.ragApplications.set(id, ragApplication)
this.ragApplications.set(base.id, ragApplication)
} catch (e) {
logger.error('Failed to create RAGApplication:', e as Error)
throw new Error(`Failed to create RAGApplication: ${e}`)
@ -263,17 +189,14 @@ class KnowledgeService {
return ragApplication
}
public create = async (_: Electron.IpcMainInvokeEvent, base: KnowledgeBaseParams): Promise<void> => {
async initialize(base: KnowledgeBaseParams): Promise<void> {
await this.getRagApplication(base)
}
public reset = async (_: Electron.IpcMainInvokeEvent, base: KnowledgeBaseParams): Promise<void> => {
const ragApplication = await this.getRagApplication(base)
await ragApplication.reset()
async reset(base: KnowledgeBaseParams): Promise<void> {
const ragApp = await this.getRagApplication(base)
await ragApp.reset()
}
public async delete(_: Electron.IpcMainInvokeEvent, id: string): Promise<void> {
async delete(id: string): Promise<void> {
logger.debug(`delete id: ${id}`)
await this.cleanupKnowledgeResources(id)
@ -286,15 +209,41 @@ class KnowledgeService {
this.pendingDeleteManager.add(id)
}
}
private maximumLoad() {
return (
this.processingItemCount >= KnowledgeService.MAXIMUM_PROCESSING_ITEM_COUNT ||
this.workload >= KnowledgeService.MAXIMUM_WORKLOAD
)
getLoaderTask(options: KnowledgeBaseAddItemOptionsNonNullableAttribute): LoaderTask {
const { item } = options
const getRagApplication = () => this.getRagApplication(options.base)
switch (item.type) {
case 'file':
return this.fileTask(getRagApplication, options)
case 'directory':
return this.directoryTask(getRagApplication, options)
case 'url':
return this.urlTask(getRagApplication, options)
case 'sitemap':
return this.sitemapTask(getRagApplication, options)
case 'note':
return this.noteTask(getRagApplication, options)
default:
return {
loaderTasks: [],
loaderDoneReturn: null
}
}
}
async remove(options: { uniqueIds: string[]; base: KnowledgeBaseParams }): Promise<void> {
const ragApp = await this.getRagApplication(options.base)
for (const id of options.uniqueIds) {
await ragApp.deleteLoader(id)
}
}
async search(options: { search: string; base: KnowledgeBaseParams }): Promise<KnowledgeSearchResult[]> {
const ragApp = await this.getRagApplication(options.base)
return await ragApp.search(options.search)
}
private fileTask(
ragApplication: RAGApplication,
getRagApplication: () => Promise<RAGApplication>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item, forceReload, userId } = options
@ -307,7 +256,8 @@ class KnowledgeService {
task: async () => {
try {
// Add preprocessing logic
const fileToProcess: FileMetadata = await this.preprocessing(file, base, item, userId)
const ragApplication = await getRagApplication()
const fileToProcess: FileMetadata = await preprocessingService.preprocessFile(file, base, item, userId)
// Use processed file for loading
return addFileLoader(ragApplication, fileToProcess, base, forceReload)
@ -318,7 +268,7 @@ class KnowledgeService {
.catch((e) => {
logger.error(`Error in addFileLoader for ${file.name}: ${e}`)
const errorResult: LoaderReturn = {
...KnowledgeService.ERROR_LOADER_RETURN,
...EmbedJsFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'embedding'
}
@ -328,7 +278,7 @@ class KnowledgeService {
} catch (e: any) {
logger.error(`Preprocessing failed for ${file.name}: ${e}`)
const errorResult: LoaderReturn = {
...KnowledgeService.ERROR_LOADER_RETURN,
...EmbedJsFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'preprocess'
}
@ -345,7 +295,7 @@ class KnowledgeService {
return loaderTask
}
private directoryTask(
ragApplication: RAGApplication,
getRagApplication: () => Promise<RAGApplication>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item, forceReload } = options
@ -372,8 +322,9 @@ class KnowledgeService {
for (const file of files) {
loaderTasks.push({
state: LoaderTaskItemState.PENDING,
task: () =>
addFileLoader(ragApplication, file, base, forceReload)
task: async () => {
const ragApplication = await getRagApplication()
return addFileLoader(ragApplication, file, base, forceReload)
.then((result) => {
loaderDoneReturn.entriesAdded += 1
processedFiles += 1
@ -384,11 +335,12 @@ class KnowledgeService {
.catch((err) => {
logger.error('Failed to add dir loader:', err)
return {
...KnowledgeService.ERROR_LOADER_RETURN,
...EmbedJsFramework.ERROR_LOADER_RETURN,
message: `Failed to add dir loader: ${err.message}`,
messageSource: 'embedding'
}
}),
})
},
evaluateTaskWorkload: { workload: file.size }
})
}
@ -400,7 +352,7 @@ class KnowledgeService {
}
private urlTask(
ragApplication: RAGApplication,
getRagApplication: () => Promise<RAGApplication>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item, forceReload } = options
@ -410,7 +362,8 @@ class KnowledgeService {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: () => {
task: async () => {
const ragApplication = await getRagApplication()
const loaderReturn = ragApplication.addLoader(
new WebLoader({
urlOrContent: content,
@ -434,7 +387,7 @@ class KnowledgeService {
.catch((err) => {
logger.error('Failed to add url loader:', err)
return {
...KnowledgeService.ERROR_LOADER_RETURN,
...EmbedJsFramework.ERROR_LOADER_RETURN,
message: `Failed to add url loader: ${err.message}`,
messageSource: 'embedding'
}
@ -449,7 +402,7 @@ class KnowledgeService {
}
private sitemapTask(
ragApplication: RAGApplication,
getRagApplication: () => Promise<RAGApplication>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item, forceReload } = options
@ -459,8 +412,9 @@ class KnowledgeService {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: () =>
ragApplication
task: async () => {
const ragApplication = await getRagApplication()
return ragApplication
.addLoader(
new SitemapLoader({ url: content, chunkSize: base.chunkSize, chunkOverlap: base.chunkOverlap }) as any,
forceReload
@ -478,11 +432,12 @@ class KnowledgeService {
.catch((err) => {
logger.error('Failed to add sitemap loader:', err)
return {
...KnowledgeService.ERROR_LOADER_RETURN,
...EmbedJsFramework.ERROR_LOADER_RETURN,
message: `Failed to add sitemap loader: ${err.message}`,
messageSource: 'embedding'
}
}),
})
},
evaluateTaskWorkload: { workload: 20 * MB }
}
],
@ -492,7 +447,7 @@ class KnowledgeService {
}
private noteTask(
ragApplication: RAGApplication,
getRagApplication: () => Promise<RAGApplication>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item, forceReload } = options
@ -505,7 +460,8 @@ class KnowledgeService {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: () => {
task: async () => {
const ragApplication = await getRagApplication()
const loaderReturn = ragApplication.addLoader(
new NoteLoader({
text: content,
@ -528,7 +484,7 @@ class KnowledgeService {
.catch((err) => {
logger.error('Failed to add note loader:', err)
return {
...KnowledgeService.ERROR_LOADER_RETURN,
...EmbedJsFramework.ERROR_LOADER_RETURN,
message: `Failed to add note loader: ${err.message}`,
messageSource: 'embedding'
}
@ -541,199 +497,4 @@ class KnowledgeService {
}
return loaderTask
}
private processingQueueHandle() {
const getSubtasksUntilMaximumLoad = (): QueueTaskItem[] => {
const queueTaskList: QueueTaskItem[] = []
that: for (const [task, resolve] of this.knowledgeItemProcessingQueueMappingPromise) {
for (const item of task.loaderTasks) {
if (this.maximumLoad()) {
break that
}
const { state, task: taskPromise, evaluateTaskWorkload } = item
if (state !== LoaderTaskItemState.PENDING) {
continue
}
const { workload } = evaluateTaskWorkload
this.workload += workload
this.processingItemCount += 1
item.state = LoaderTaskItemState.PROCESSING
queueTaskList.push({
taskPromise: () =>
taskPromise().then(() => {
this.workload -= workload
this.processingItemCount -= 1
task.loaderTasks.delete(item)
if (task.loaderTasks.size === 0) {
this.knowledgeItemProcessingQueueMappingPromise.delete(task)
resolve()
}
this.processingQueueHandle()
}),
resolve: () => {},
evaluateTaskWorkload
})
}
}
return queueTaskList
}
const subTasks = getSubtasksUntilMaximumLoad()
if (subTasks.length > 0) {
const subTaskPromises = subTasks.map(({ taskPromise }) => taskPromise())
Promise.all(subTaskPromises).then(() => {
subTasks.forEach(({ resolve }) => resolve())
})
}
}
private appendProcessingQueue(task: LoaderTask): Promise<LoaderReturn> {
return new Promise((resolve) => {
this.knowledgeItemProcessingQueueMappingPromise.set(loaderTaskIntoOfSet(task), () => {
resolve(task.loaderDoneReturn!)
})
})
}
public add = (_: Electron.IpcMainInvokeEvent, options: KnowledgeBaseAddItemOptions): Promise<LoaderReturn> => {
return new Promise((resolve) => {
const { base, item, forceReload = false, userId = '' } = options
const optionsNonNullableAttribute = { base, item, forceReload, userId }
this.getRagApplication(base)
.then((ragApplication) => {
const task = (() => {
switch (item.type) {
case 'file':
return this.fileTask(ragApplication, optionsNonNullableAttribute)
case 'directory':
return this.directoryTask(ragApplication, optionsNonNullableAttribute)
case 'url':
return this.urlTask(ragApplication, optionsNonNullableAttribute)
case 'sitemap':
return this.sitemapTask(ragApplication, optionsNonNullableAttribute)
case 'note':
return this.noteTask(ragApplication, optionsNonNullableAttribute)
default:
return null
}
})()
if (task) {
this.appendProcessingQueue(task).then(() => {
resolve(task.loaderDoneReturn!)
})
this.processingQueueHandle()
} else {
resolve({
...KnowledgeService.ERROR_LOADER_RETURN,
message: 'Unsupported item type',
messageSource: 'embedding'
})
}
})
.catch((err) => {
logger.error('Failed to add item:', err)
resolve({
...KnowledgeService.ERROR_LOADER_RETURN,
message: `Failed to add item: ${err.message}`,
messageSource: 'embedding'
})
})
})
}
@TraceMethod({ spanName: 'remove', tag: 'Knowledge' })
public async remove(
_: Electron.IpcMainInvokeEvent,
{ uniqueId, uniqueIds, base }: { uniqueId: string; uniqueIds: string[]; base: KnowledgeBaseParams }
): Promise<void> {
const ragApplication = await this.getRagApplication(base)
logger.debug(`Remove Item UniqueId: ${uniqueId}`)
for (const id of uniqueIds) {
await ragApplication.deleteLoader(id)
}
}
@TraceMethod({ spanName: 'RagSearch', tag: 'Knowledge' })
public async search(
_: Electron.IpcMainInvokeEvent,
{ search, base }: { search: string; base: KnowledgeBaseParams }
): Promise<ExtractChunkData[]> {
const ragApplication = await this.getRagApplication(base)
return await ragApplication.search(search)
}
@TraceMethod({ spanName: 'rerank', tag: 'Knowledge' })
public async rerank(
_: Electron.IpcMainInvokeEvent,
{ search, base, results }: { search: string; base: KnowledgeBaseParams; results: ExtractChunkData[] }
): Promise<ExtractChunkData[]> {
if (results.length === 0) {
return results
}
return await new Reranker(base).rerank(search, results)
}
public getStorageDir = (): string => {
return this.storageDir
}
private preprocessing = async (
file: FileMetadata,
base: KnowledgeBaseParams,
item: KnowledgeItem,
userId: string
): Promise<FileMetadata> => {
let fileToProcess: FileMetadata = file
if (base.preprocessProvider && file.ext.toLowerCase() === '.pdf') {
try {
const provider = new PreprocessProvider(base.preprocessProvider.provider, userId)
const filePath = fileStorage.getFilePathById(file)
// Check if file has already been preprocessed
const alreadyProcessed = await provider.checkIfAlreadyProcessed(file)
if (alreadyProcessed) {
logger.debug(`File already preprocess processed, using cached result: ${filePath}`)
return alreadyProcessed
}
// Execute preprocessing
logger.debug(`Starting preprocess processing for scanned PDF: ${filePath}`)
const { processedFile, quota } = await provider.parseFile(item.id, file)
fileToProcess = processedFile
const mainWindow = windowService.getMainWindow()
mainWindow?.webContents.send('file-preprocess-finished', {
itemId: item.id,
quota: quota
})
} catch (err) {
logger.error(`Preprocess processing failed: ${err}`)
// If preprocessing fails, use original file
// fileToProcess = file
throw new Error(`Preprocess processing failed: ${err}`)
}
}
return fileToProcess
}
public checkQuota = async (
_: Electron.IpcMainInvokeEvent,
base: KnowledgeBaseParams,
userId: string
): Promise<number> => {
try {
if (base.preprocessProvider && base.preprocessProvider.type === 'preprocess') {
const provider = new PreprocessProvider(base.preprocessProvider.provider, userId)
return await provider.checkQuota()
}
throw new Error('No preprocess provider configured')
} catch (err) {
logger.error(`Failed to check quota: ${err}`)
throw new Error(`Failed to check quota: ${err}`)
}
}
}
export default new KnowledgeService()

View File

@ -0,0 +1,72 @@
import { LoaderReturn } from '@shared/config/types'
import { KnowledgeBaseParams, KnowledgeItem, KnowledgeSearchResult } from '@types'
export interface KnowledgeBaseAddItemOptions {
base: KnowledgeBaseParams
item: KnowledgeItem
forceReload?: boolean
userId?: string
}
export interface KnowledgeBaseAddItemOptionsNonNullableAttribute {
base: KnowledgeBaseParams
item: KnowledgeItem
forceReload: boolean
userId: string
}
export interface EvaluateTaskWorkload {
workload: number
}
export type LoaderDoneReturn = LoaderReturn | null
export enum LoaderTaskItemState {
PENDING,
PROCESSING,
DONE
}
export interface LoaderTaskItem {
state: LoaderTaskItemState
task: () => Promise<unknown>
evaluateTaskWorkload: EvaluateTaskWorkload
}
export interface LoaderTask {
loaderTasks: LoaderTaskItem[]
loaderDoneReturn: LoaderDoneReturn
}
export interface LoaderTaskOfSet {
loaderTasks: Set<LoaderTaskItem>
loaderDoneReturn: LoaderDoneReturn
}
export interface QueueTaskItem {
taskPromise: () => Promise<unknown>
resolve: () => void
evaluateTaskWorkload: EvaluateTaskWorkload
}
export const loaderTaskIntoOfSet = (loaderTask: LoaderTask): LoaderTaskOfSet => {
return {
loaderTasks: new Set(loaderTask.loaderTasks),
loaderDoneReturn: loaderTask.loaderDoneReturn
}
}
export interface IKnowledgeFramework {
/** 为给定知识库初始化框架资源 */
initialize(base: KnowledgeBaseParams): Promise<void>
/** 重置知识库,删除其所有内容 */
reset(base: KnowledgeBaseParams): Promise<void>
/** 删除与知识库关联的资源,包括文件 */
delete(id: string): Promise<void>
/** 生成用于添加条目的任务对象,由队列处理 */
getLoaderTask(options: KnowledgeBaseAddItemOptionsNonNullableAttribute): LoaderTask
/** 从知识库中删除特定条目 */
remove(options: { uniqueIds: string[]; base: KnowledgeBaseParams }): Promise<void>
/** 搜索知识库 */
search(options: { search: string; base: KnowledgeBaseParams }): Promise<KnowledgeSearchResult[]>
}

View File

@ -0,0 +1,48 @@
import path from 'node:path'
import { KnowledgeBaseParams } from '@types'
import { app } from 'electron'
import { EmbedJsFramework } from './EmbedJsFramework'
import { IKnowledgeFramework } from './IKnowledgeFramework'
import { LangChainFramework } from './LangChainFramework'
class KnowledgeFrameworkFactory {
private static instance: KnowledgeFrameworkFactory
private frameworks: Map<string, IKnowledgeFramework> = new Map()
private storageDir: string
private constructor(storageDir: string) {
this.storageDir = storageDir
}
public static getInstance(storageDir: string): KnowledgeFrameworkFactory {
if (!KnowledgeFrameworkFactory.instance) {
KnowledgeFrameworkFactory.instance = new KnowledgeFrameworkFactory(storageDir)
}
return KnowledgeFrameworkFactory.instance
}
public getFramework(base: KnowledgeBaseParams): IKnowledgeFramework {
const frameworkType = base.framework || 'embedjs' // 如果未指定,默认为 embedjs
if (this.frameworks.has(frameworkType)) {
return this.frameworks.get(frameworkType)!
}
let framework: IKnowledgeFramework
switch (frameworkType) {
case 'langchain':
framework = new LangChainFramework(this.storageDir)
break
case 'embedjs':
default:
framework = new EmbedJsFramework(this.storageDir)
break
}
this.frameworks.set(frameworkType, framework)
return framework
}
}
export const knowledgeFrameworkFactory = KnowledgeFrameworkFactory.getInstance(
path.join(app.getPath('userData'), 'Data', 'KnowledgeBase')
)

View File

@ -0,0 +1,190 @@
import * as fs from 'node:fs'
import path from 'node:path'
import { loggerService } from '@logger'
import { preprocessingService } from '@main/knowledge/preprocess/PreprocessingService'
import Reranker from '@main/knowledge/reranker/Reranker'
import { TraceMethod } from '@mcp-trace/trace-core'
import { MB } from '@shared/config/constant'
import { LoaderReturn } from '@shared/config/types'
import { KnowledgeBaseParams, KnowledgeSearchResult } from '@types'
import { app } from 'electron'
import {
KnowledgeBaseAddItemOptions,
LoaderTask,
loaderTaskIntoOfSet,
LoaderTaskItemState,
LoaderTaskOfSet,
QueueTaskItem
} from './IKnowledgeFramework'
import { knowledgeFrameworkFactory } from './KnowledgeFrameworkFactory'
const logger = loggerService.withContext('MainKnowledgeService')
class KnowledgeService {
private storageDir = path.join(app.getPath('userData'), 'Data', 'KnowledgeBase')
private workload = 0
private processingItemCount = 0
private knowledgeItemProcessingQueueMappingPromise: Map<LoaderTaskOfSet, () => void> = new Map()
private static MAXIMUM_WORKLOAD = 80 * MB
private static MAXIMUM_PROCESSING_ITEM_COUNT = 30
private static ERROR_LOADER_RETURN: LoaderReturn = {
entriesAdded: 0,
uniqueId: '',
uniqueIds: [''],
loaderType: '',
status: 'failed'
}
constructor() {
this.initStorageDir()
}
private initStorageDir = (): void => {
if (!fs.existsSync(this.storageDir)) {
fs.mkdirSync(this.storageDir, { recursive: true })
}
}
private maximumLoad() {
return (
this.processingItemCount >= KnowledgeService.MAXIMUM_PROCESSING_ITEM_COUNT ||
this.workload >= KnowledgeService.MAXIMUM_WORKLOAD
)
}
private processingQueueHandle() {
const getSubtasksUntilMaximumLoad = (): QueueTaskItem[] => {
const queueTaskList: QueueTaskItem[] = []
that: for (const [task, resolve] of this.knowledgeItemProcessingQueueMappingPromise) {
for (const item of task.loaderTasks) {
if (this.maximumLoad()) {
break that
}
const { state, task: taskPromise, evaluateTaskWorkload } = item
if (state !== LoaderTaskItemState.PENDING) {
continue
}
const { workload } = evaluateTaskWorkload
this.workload += workload
this.processingItemCount += 1
item.state = LoaderTaskItemState.PROCESSING
queueTaskList.push({
taskPromise: () =>
taskPromise().then(() => {
this.workload -= workload
this.processingItemCount -= 1
task.loaderTasks.delete(item)
if (task.loaderTasks.size === 0) {
this.knowledgeItemProcessingQueueMappingPromise.delete(task)
resolve()
}
this.processingQueueHandle()
}),
resolve: () => {},
evaluateTaskWorkload
})
}
}
return queueTaskList
}
const subTasks = getSubtasksUntilMaximumLoad()
if (subTasks.length > 0) {
const subTaskPromises = subTasks.map(({ taskPromise }) => taskPromise())
Promise.all(subTaskPromises).then(() => {
subTasks.forEach(({ resolve }) => resolve())
})
}
}
private appendProcessingQueue(task: LoaderTask): Promise<LoaderReturn> {
return new Promise((resolve) => {
this.knowledgeItemProcessingQueueMappingPromise.set(loaderTaskIntoOfSet(task), () => {
resolve(task.loaderDoneReturn!)
})
})
}
public async create(_: Electron.IpcMainInvokeEvent, base: KnowledgeBaseParams): Promise<void> {
logger.info(`Creating knowledge base: ${JSON.stringify(base)}`)
const framework = knowledgeFrameworkFactory.getFramework(base)
await framework.initialize(base)
}
public async reset(_: Electron.IpcMainInvokeEvent, { base }: { base: KnowledgeBaseParams }): Promise<void> {
const framework = knowledgeFrameworkFactory.getFramework(base)
await framework.reset(base)
}
public async delete(_: Electron.IpcMainInvokeEvent, base: KnowledgeBaseParams, id: string): Promise<void> {
logger.info(`Deleting knowledge base: ${JSON.stringify(base)}`)
const framework = knowledgeFrameworkFactory.getFramework(base)
await framework.delete(id)
}
public add = async (_: Electron.IpcMainInvokeEvent, options: KnowledgeBaseAddItemOptions): Promise<LoaderReturn> => {
logger.info(`Adding item to knowledge base: ${JSON.stringify(options)}`)
return new Promise((resolve) => {
const { base, item, forceReload = false, userId = '' } = options
const framework = knowledgeFrameworkFactory.getFramework(base)
const task = framework.getLoaderTask({ base, item, forceReload, userId })
if (task) {
this.appendProcessingQueue(task).then(() => {
resolve(task.loaderDoneReturn!)
})
this.processingQueueHandle()
} else {
resolve({
...KnowledgeService.ERROR_LOADER_RETURN,
message: 'Unsupported item type',
messageSource: 'embedding'
})
}
})
}
public async remove(
_: Electron.IpcMainInvokeEvent,
{ uniqueIds, base }: { uniqueIds: string[]; base: KnowledgeBaseParams }
): Promise<void> {
logger.info(`Removing items from knowledge base: ${JSON.stringify({ uniqueIds, base })}`)
const framework = knowledgeFrameworkFactory.getFramework(base)
await framework.remove({ uniqueIds, base })
}
public async search(
_: Electron.IpcMainInvokeEvent,
{ search, base }: { search: string; base: KnowledgeBaseParams }
): Promise<KnowledgeSearchResult[]> {
logger.info(`Searching knowledge base: ${JSON.stringify({ search, base })}`)
const framework = knowledgeFrameworkFactory.getFramework(base)
return framework.search({ search, base })
}
@TraceMethod({ spanName: 'rerank', tag: 'Knowledge' })
public async rerank(
_: Electron.IpcMainInvokeEvent,
{ search, base, results }: { search: string; base: KnowledgeBaseParams; results: KnowledgeSearchResult[] }
): Promise<KnowledgeSearchResult[]> {
logger.info(`Reranking knowledge base: ${JSON.stringify({ search, base, results })}`)
if (results.length === 0) {
return results
}
return await new Reranker(base).rerank(search, results)
}
public getStorageDir = (): string => {
return this.storageDir
}
public async checkQuota(_: Electron.IpcMainInvokeEvent, base: KnowledgeBaseParams, userId: string): Promise<number> {
return preprocessingService.checkQuota(base, userId)
}
}
export default new KnowledgeService()

View File

@ -0,0 +1,555 @@
import * as fs from 'node:fs'
import path from 'node:path'
import { FaissStore } from '@langchain/community/vectorstores/faiss'
import type { Document } from '@langchain/core/documents'
import { loggerService } from '@logger'
import TextEmbeddings from '@main/knowledge/langchain/embeddings/TextEmbeddings'
import {
addFileLoader,
addNoteLoader,
addSitemapLoader,
addVideoLoader,
addWebLoader
} from '@main/knowledge/langchain/loader'
import { RetrieverFactory } from '@main/knowledge/langchain/retriever'
import { preprocessingService } from '@main/knowledge/preprocess/PreprocessingService'
import { getAllFiles } from '@main/utils/file'
import { getUrlSource } from '@main/utils/knowledge'
import { MB } from '@shared/config/constant'
import { LoaderReturn } from '@shared/config/types'
import { IpcChannel } from '@shared/IpcChannel'
import {
FileMetadata,
isKnowledgeDirectoryItem,
isKnowledgeFileItem,
isKnowledgeNoteItem,
isKnowledgeSitemapItem,
isKnowledgeUrlItem,
isKnowledgeVideoItem,
KnowledgeBaseParams,
KnowledgeSearchResult
} from '@types'
import { uuidv4 } from 'zod/v4'
import { windowService } from '../WindowService'
import {
IKnowledgeFramework,
KnowledgeBaseAddItemOptionsNonNullableAttribute,
LoaderDoneReturn,
LoaderTask,
LoaderTaskItem,
LoaderTaskItemState
} from './IKnowledgeFramework'
const logger = loggerService.withContext('LangChainFramework')
export class LangChainFramework implements IKnowledgeFramework {
private storageDir: string
private static ERROR_LOADER_RETURN: LoaderReturn = {
entriesAdded: 0,
uniqueId: '',
uniqueIds: [''],
loaderType: '',
status: 'failed'
}
constructor(storageDir: string) {
this.storageDir = storageDir
this.initStorageDir()
}
private initStorageDir = (): void => {
if (!fs.existsSync(this.storageDir)) {
fs.mkdirSync(this.storageDir, { recursive: true })
}
}
private async createDatabase(base: KnowledgeBaseParams): Promise<void> {
const dbPath = path.join(this.storageDir, base.id)
const embeddings = this.getEmbeddings(base)
const vectorStore = new FaissStore(embeddings, {})
const mockDocument: Document = {
pageContent: 'Create Database Document',
metadata: {}
}
await vectorStore.addDocuments([mockDocument], { ids: ['1'] })
await vectorStore.save(dbPath)
await vectorStore.delete({ ids: ['1'] })
await vectorStore.save(dbPath)
}
private getEmbeddings(base: KnowledgeBaseParams): TextEmbeddings {
return new TextEmbeddings({
embedApiClient: base.embedApiClient,
dimensions: base.dimensions
})
}
private async getVectorStore(base: KnowledgeBaseParams): Promise<FaissStore> {
const embeddings = this.getEmbeddings(base)
const vectorStore = await FaissStore.load(path.join(this.storageDir, base.id), embeddings)
return vectorStore
}
async initialize(base: KnowledgeBaseParams): Promise<void> {
await this.createDatabase(base)
}
async reset(base: KnowledgeBaseParams): Promise<void> {
const dbPath = path.join(this.storageDir, base.id)
if (fs.existsSync(dbPath)) {
fs.rmSync(dbPath, { recursive: true })
}
}
async delete(id: string): Promise<void> {
const dbPath = path.join(this.storageDir, id)
if (fs.existsSync(dbPath)) {
fs.rmSync(dbPath, { recursive: true })
}
}
getLoaderTask(options: KnowledgeBaseAddItemOptionsNonNullableAttribute): LoaderTask {
const { item } = options
const getStore = () => this.getVectorStore(options.base)
switch (item.type) {
case 'file':
return this.fileTask(getStore, options)
case 'directory':
return this.directoryTask(getStore, options)
case 'url':
return this.urlTask(getStore, options)
case 'sitemap':
return this.sitemapTask(getStore, options)
case 'note':
return this.noteTask(getStore, options)
case 'video':
return this.videoTask(getStore, options)
default:
return {
loaderTasks: [],
loaderDoneReturn: null
}
}
}
async remove(options: { uniqueIds: string[]; base: KnowledgeBaseParams }): Promise<void> {
const { uniqueIds, base } = options
const vectorStore = await this.getVectorStore(base)
logger.info(`[ KnowledgeService Remove Item UniqueIds: ${uniqueIds}]`)
await vectorStore.delete({ ids: uniqueIds })
await vectorStore.save(path.join(this.storageDir, base.id))
}
async search(options: { search: string; base: KnowledgeBaseParams }): Promise<KnowledgeSearchResult[]> {
const { search, base } = options
logger.info(`search base: ${JSON.stringify(base)}`)
try {
const vectorStore = await this.getVectorStore(base)
// 如果是 bm25 或 hybrid 模式,则从数据库获取所有文档
const documents: Document[] = await this.getAllDocuments(base)
if (documents.length === 0) return []
const retrieverFactory = new RetrieverFactory()
const retriever = retrieverFactory.createRetriever(base, vectorStore, documents)
const results = await retriever.invoke(search)
logger.info(`Search Results: ${JSON.stringify(results)}`)
// VectorStoreRetriever 和 EnsembleRetriever 会将分数附加到 metadata.score
// BM25Retriever 默认不返回分数,所以我们需要处理这种情况
return results.map((item) => {
return {
pageContent: item.pageContent,
metadata: item.metadata,
// 如果 metadata 中没有 score提供一个默认值
score: typeof item.metadata.score === 'number' ? item.metadata.score : 0
}
})
} catch (error: any) {
logger.error(`Error during search in knowledge base ${base.id}: ${error.message}`)
return []
}
}
private fileTask(
getVectorStore: () => Promise<FaissStore>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item, userId } = options
if (!isKnowledgeFileItem(item)) {
logger.error(`Invalid item type for fileTask: expected 'file', got '${item.type}'`)
return {
loaderTasks: [],
loaderDoneReturn: {
...LangChainFramework.ERROR_LOADER_RETURN,
message: `Invalid item type: expected 'file', got '${item.type}'`,
messageSource: 'validation'
}
}
}
const file = item.content
const loaderTask: LoaderTask = {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: async () => {
try {
const vectorStore = await getVectorStore()
// 添加预处理逻辑
const fileToProcess: FileMetadata = await preprocessingService.preprocessFile(file, base, item, userId)
// 使用处理后的文件进行加载
return addFileLoader(base, vectorStore, fileToProcess)
.then((result) => {
loaderTask.loaderDoneReturn = result
return result
})
.then(async () => {
await vectorStore.save(path.join(this.storageDir, base.id))
})
.catch((e) => {
logger.error(`Error in addFileLoader for ${file.name}: ${e}`)
const errorResult: LoaderReturn = {
...LangChainFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'embedding'
}
loaderTask.loaderDoneReturn = errorResult
return errorResult
})
} catch (e: any) {
logger.error(`Preprocessing failed for ${file.name}: ${e}`)
const errorResult: LoaderReturn = {
...LangChainFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'preprocess'
}
loaderTask.loaderDoneReturn = errorResult
return errorResult
}
},
evaluateTaskWorkload: { workload: file.size }
}
],
loaderDoneReturn: null
}
return loaderTask
}
private directoryTask(
getVectorStore: () => Promise<FaissStore>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item } = options
if (!isKnowledgeDirectoryItem(item)) {
logger.error(`Invalid item type for directoryTask: expected 'directory', got '${item.type}'`)
return {
loaderTasks: [],
loaderDoneReturn: {
...LangChainFramework.ERROR_LOADER_RETURN,
message: `Invalid item type: expected 'directory', got '${item.type}'`,
messageSource: 'validation'
}
}
}
const directory = item.content
const files = getAllFiles(directory)
const totalFiles = files.length
let processedFiles = 0
const sendDirectoryProcessingPercent = (totalFiles: number, processedFiles: number) => {
const mainWindow = windowService.getMainWindow()
mainWindow?.webContents.send(IpcChannel.DirectoryProcessingPercent, {
itemId: item.id,
percent: (processedFiles / totalFiles) * 100
})
}
const loaderDoneReturn: LoaderDoneReturn = {
entriesAdded: 0,
uniqueId: `DirectoryLoader_${uuidv4()}`,
uniqueIds: [],
loaderType: 'DirectoryLoader'
}
const loaderTasks: LoaderTaskItem[] = []
for (const file of files) {
loaderTasks.push({
state: LoaderTaskItemState.PENDING,
task: async () => {
const vectorStore = await getVectorStore()
return addFileLoader(base, vectorStore, file)
.then((result) => {
loaderDoneReturn.entriesAdded += 1
processedFiles += 1
sendDirectoryProcessingPercent(totalFiles, processedFiles)
loaderDoneReturn.uniqueIds.push(result.uniqueId)
return result
})
.then(async () => {
await vectorStore.save(path.join(this.storageDir, base.id))
})
.catch((err) => {
logger.error(err)
return {
...LangChainFramework.ERROR_LOADER_RETURN,
message: `Failed to add dir loader: ${err.message}`,
messageSource: 'embedding'
}
})
},
evaluateTaskWorkload: { workload: file.size }
})
}
return {
loaderTasks,
loaderDoneReturn
}
}
private urlTask(
getVectorStore: () => Promise<FaissStore>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item } = options
if (!isKnowledgeUrlItem(item)) {
logger.error(`Invalid item type for urlTask: expected 'url', got '${item.type}'`)
return {
loaderTasks: [],
loaderDoneReturn: {
...LangChainFramework.ERROR_LOADER_RETURN,
message: `Invalid item type: expected 'url', got '${item.type}'`,
messageSource: 'validation'
}
}
}
const url = item.content
const loaderTask: LoaderTask = {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: async () => {
// 使用处理后的网页进行加载
const vectorStore = await getVectorStore()
return addWebLoader(base, vectorStore, url, getUrlSource(url))
.then((result) => {
loaderTask.loaderDoneReturn = result
return result
})
.then(async () => {
await vectorStore.save(path.join(this.storageDir, base.id))
})
.catch((e) => {
logger.error(`Error in addWebLoader for ${url}: ${e}`)
const errorResult: LoaderReturn = {
...LangChainFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'embedding'
}
loaderTask.loaderDoneReturn = errorResult
return errorResult
})
},
evaluateTaskWorkload: { workload: 2 * MB }
}
],
loaderDoneReturn: null
}
return loaderTask
}
private sitemapTask(
getVectorStore: () => Promise<FaissStore>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item } = options
if (!isKnowledgeSitemapItem(item)) {
logger.error(`Invalid item type for sitemapTask: expected 'sitemap', got '${item.type}'`)
return {
loaderTasks: [],
loaderDoneReturn: {
...LangChainFramework.ERROR_LOADER_RETURN,
message: `Invalid item type: expected 'sitemap', got '${item.type}'`,
messageSource: 'validation'
}
}
}
const url = item.content
const loaderTask: LoaderTask = {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: async () => {
// 使用处理后的网页进行加载
const vectorStore = await getVectorStore()
return addSitemapLoader(base, vectorStore, url)
.then((result) => {
loaderTask.loaderDoneReturn = result
return result
})
.then(async () => {
await vectorStore.save(path.join(this.storageDir, base.id))
})
.catch((e) => {
logger.error(`Error in addWebLoader for ${url}: ${e}`)
const errorResult: LoaderReturn = {
...LangChainFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'embedding'
}
loaderTask.loaderDoneReturn = errorResult
return errorResult
})
},
evaluateTaskWorkload: { workload: 2 * MB }
}
],
loaderDoneReturn: null
}
return loaderTask
}
private noteTask(
getVectorStore: () => Promise<FaissStore>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item } = options
if (!isKnowledgeNoteItem(item)) {
logger.error(`Invalid item type for noteTask: expected 'note', got '${item.type}'`)
return {
loaderTasks: [],
loaderDoneReturn: {
...LangChainFramework.ERROR_LOADER_RETURN,
message: `Invalid item type: expected 'note', got '${item.type}'`,
messageSource: 'validation'
}
}
}
const content = item.content
const sourceUrl = item.sourceUrl ?? ''
logger.info(`noteTask ${content}, ${sourceUrl}`)
const encoder = new TextEncoder()
const contentBytes = encoder.encode(content)
const loaderTask: LoaderTask = {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: async () => {
// 使用处理后的笔记进行加载
const vectorStore = await getVectorStore()
return addNoteLoader(base, vectorStore, content, sourceUrl)
.then((result) => {
loaderTask.loaderDoneReturn = result
return result
})
.then(async () => {
await vectorStore.save(path.join(this.storageDir, base.id))
})
.catch((e) => {
logger.error(`Error in addNoteLoader for ${sourceUrl}: ${e}`)
const errorResult: LoaderReturn = {
...LangChainFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'embedding'
}
loaderTask.loaderDoneReturn = errorResult
return errorResult
})
},
evaluateTaskWorkload: { workload: contentBytes.length }
}
],
loaderDoneReturn: null
}
return loaderTask
}
private videoTask(
getVectorStore: () => Promise<FaissStore>,
options: KnowledgeBaseAddItemOptionsNonNullableAttribute
): LoaderTask {
const { base, item } = options
if (!isKnowledgeVideoItem(item)) {
logger.error(`Invalid item type for videoTask: expected 'video', got '${item.type}'`)
return {
loaderTasks: [],
loaderDoneReturn: {
...LangChainFramework.ERROR_LOADER_RETURN,
message: `Invalid item type: expected 'video', got '${item.type}'`,
messageSource: 'validation'
}
}
}
const files = item.content
const loaderTask: LoaderTask = {
loaderTasks: [
{
state: LoaderTaskItemState.PENDING,
task: async () => {
const vectorStore = await getVectorStore()
return addVideoLoader(base, vectorStore, files)
.then((result) => {
loaderTask.loaderDoneReturn = result
return result
})
.then(async () => {
await vectorStore.save(path.join(this.storageDir, base.id))
})
.catch((e) => {
logger.error(`Preprocessing failed for ${files[0].name}: ${e}`)
const errorResult: LoaderReturn = {
...LangChainFramework.ERROR_LOADER_RETURN,
message: e.message,
messageSource: 'preprocess'
}
loaderTask.loaderDoneReturn = errorResult
return errorResult
})
},
evaluateTaskWorkload: { workload: files[0].size }
}
],
loaderDoneReturn: null
}
return loaderTask
}
private async getAllDocuments(base: KnowledgeBaseParams): Promise<Document[]> {
logger.info(`Fetching all documents from database for knowledge base: ${base.id}`)
try {
const results = (await this.getVectorStore(base)).docstore._docs
const documents: Document[] = Array.from(results.values())
logger.info(`Fetched ${documents.length} documents for BM25/Hybrid retriever.`)
return documents
} catch (e) {
logger.error(`Could not fetch documents from database for base ${base.id}: ${e}`)
// 如果表不存在或查询失败,返回空数组
return []
}
}
}

View File

@ -1,6 +1,6 @@
import { Client, createClient } from '@libsql/client'
import { loggerService } from '@logger'
import Embeddings from '@main/knowledge/embeddings/Embeddings'
import Embeddings from '@main/knowledge/embedjs/embeddings/Embeddings'
import type {
AddMemoryOptions,
AssistantMessage,

View File

@ -2,6 +2,7 @@ import { loggerService } from '@logger'
import { isLinux } from '@main/constant'
import { BuiltinOcrProviderIds, OcrHandler, OcrProvider, OcrResult, SupportedOcrFile } from '@types'
import { ppocrService } from './builtin/PpocrService'
import { systemOcrService } from './builtin/SystemOcrService'
import { tesseractService } from './builtin/TesseractService'
@ -36,3 +37,5 @@ export const ocrService = new OcrService()
ocrService.register(BuiltinOcrProviderIds.tesseract, tesseractService.ocr.bind(tesseractService))
!isLinux && ocrService.register(BuiltinOcrProviderIds.system, systemOcrService.ocr.bind(systemOcrService))
ocrService.register(BuiltinOcrProviderIds.paddleocr, ppocrService.ocr.bind(ppocrService))

View File

@ -0,0 +1,100 @@
import { loadOcrImage } from '@main/utils/ocr'
import { ImageFileMetadata, isImageFileMetadata, OcrPpocrConfig, OcrResult, SupportedOcrFile } from '@types'
import { net } from 'electron'
import { z } from 'zod'
import { OcrBaseService } from './OcrBaseService'
enum FileType {
PDF = 0,
Image = 1
}
// API Reference: https://www.paddleocr.ai/latest/version3.x/pipeline_usage/OCR.html#3
interface OcrPayload {
file: string
fileType?: FileType | null
useDocOrientationClassify?: boolean | null
useDocUnwarping?: boolean | null
useTextlineOrientation?: boolean | null
textDetLimitSideLen?: number | null
textDetLimitType?: string | null
textDetThresh?: number | null
textDetBoxThresh?: number | null
textDetUnclipRatio?: number | null
textRecScoreThresh?: number | null
visualize?: boolean | null
}
const OcrResponseSchema = z.object({
result: z.object({
ocrResults: z.array(
z.object({
prunedResult: z.object({
rec_texts: z.array(z.string())
})
})
)
})
})
export class PpocrService extends OcrBaseService {
public ocr = async (file: SupportedOcrFile, options?: OcrPpocrConfig): Promise<OcrResult> => {
if (!isImageFileMetadata(file)) {
throw new Error('Only image files are supported currently')
}
if (!options) {
throw new Error('config is required')
}
return this.imageOcr(file, options)
}
private async imageOcr(file: ImageFileMetadata, options: OcrPpocrConfig): Promise<OcrResult> {
if (!options.apiUrl) {
throw new Error('API URL is required')
}
const apiUrl = options.apiUrl
const buffer = await loadOcrImage(file)
const base64 = buffer.toString('base64')
const payload = {
file: base64,
fileType: FileType.Image,
useDocOrientationClassify: false,
useDocUnwarping: false,
visualize: false
} satisfies OcrPayload
const headers: Record<string, string> = {
'Content-Type': 'application/json'
}
if (options.accessToken) {
headers['Authorization'] = `token ${options.accessToken}`
}
try {
const response = await net.fetch(apiUrl, {
method: 'POST',
headers,
body: JSON.stringify(payload)
})
if (!response.ok) {
const text = await response.text()
throw new Error(`OCR service error: ${response.status} ${response.statusText} - ${text}`)
}
const data = await response.json()
const validatedResponse = OcrResponseSchema.parse(data)
const recTexts = validatedResponse.result.ocrResults[0].prunedResult.rec_texts
return { text: recTexts.join('\n') }
} catch (error: any) {
throw new Error(`OCR service error: ${error.message}`)
}
}
}
export const ppocrService = new PpocrService()

View File

@ -205,6 +205,19 @@ export async function readTextFileWithAutoEncoding(filePath: string): Promise<st
return iconv.decode(data, 'UTF-8')
}
export async function base64Image(file: FileMetadata): Promise<{ mime: string; base64: string; data: string }> {
const filePath = path.join(getFilesDir(), `${file.id}${file.ext}`)
const data = await fs.promises.readFile(filePath)
const base64 = data.toString('base64')
const ext = path.extname(filePath).slice(1) == 'jpg' ? 'jpeg' : path.extname(filePath).slice(1)
const mime = `image/${ext}`
return {
mime,
base64,
data: `data:${mime};base64,${base64}`
}
}
/**
*
* @param dirPath

View File

@ -0,0 +1,13 @@
export const DEFAULT_DOCUMENT_COUNT = 6
export const DEFAULT_RELEVANT_SCORE = 0
export type UrlSource = 'normal' | 'github' | 'youtube'
const youtubeRegex = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.be|youtube\.be|yt\.be)/i
export function getUrlSource(url: string): UrlSource {
if (youtubeRegex.test(url)) {
return 'youtube'
} else {
return 'normal'
}
}

View File

@ -1,4 +1,3 @@
import type { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import { electronAPI } from '@electron-toolkit/preload'
import { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
import { SpanContext } from '@opentelemetry/api'
@ -14,6 +13,7 @@ import {
FileUploadResponse,
KnowledgeBaseParams,
KnowledgeItem,
KnowledgeSearchResult,
MCPServer,
MemoryConfig,
MemoryListOptions,
@ -166,7 +166,8 @@ const api = {
selectFolder: (options?: OpenDialogOptions) => ipcRenderer.invoke(IpcChannel.File_SelectFolder, options),
saveImage: (name: string, data: string) => ipcRenderer.invoke(IpcChannel.File_SaveImage, name, data),
binaryImage: (fileId: string) => ipcRenderer.invoke(IpcChannel.File_BinaryImage, fileId),
base64Image: (fileId: string) => ipcRenderer.invoke(IpcChannel.File_Base64Image, fileId),
base64Image: (fileId: string): Promise<{ mime: string; base64: string; data: string }> =>
ipcRenderer.invoke(IpcChannel.File_Base64Image, fileId),
saveBase64Image: (data: string) => ipcRenderer.invoke(IpcChannel.File_SaveBase64Image, data),
savePastedImage: (imageData: Uint8Array, extension?: string) =>
ipcRenderer.invoke(IpcChannel.File_SavePastedImage, imageData, extension),
@ -215,7 +216,7 @@ const api = {
create: (base: KnowledgeBaseParams, context?: SpanContext) =>
tracedInvoke(IpcChannel.KnowledgeBase_Create, context, base),
reset: (base: KnowledgeBaseParams) => ipcRenderer.invoke(IpcChannel.KnowledgeBase_Reset, base),
delete: (id: string) => ipcRenderer.invoke(IpcChannel.KnowledgeBase_Delete, id),
delete: (base: KnowledgeBaseParams, id: string) => ipcRenderer.invoke(IpcChannel.KnowledgeBase_Delete, base, id),
add: ({
base,
item,
@ -232,7 +233,7 @@ const api = {
search: ({ search, base }: { search: string; base: KnowledgeBaseParams }, context?: SpanContext) =>
tracedInvoke(IpcChannel.KnowledgeBase_Search, context, { search, base }),
rerank: (
{ search, base, results }: { search: string; base: KnowledgeBaseParams; results: ExtractChunkData[] },
{ search, base, results }: { search: string; base: KnowledgeBaseParams; results: KnowledgeSearchResult[] },
context?: SpanContext
) => tracedInvoke(IpcChannel.KnowledgeBase_Rerank, context, { search, base, results }),
checkQuota: ({ base, userId }: { base: KnowledgeBaseParams; userId: string }) =>

View File

@ -5,7 +5,7 @@
<meta name="viewport" content="initial-scale=1, width=device-width" />
<meta
http-equiv="Content-Security-Policy"
content="default-src 'self'; connect-src blob: *; script-src 'self' 'unsafe-eval' 'unsafe-inline' *; worker-src 'self' blob:; style-src 'self' 'unsafe-inline' *; font-src 'self' data: *; img-src 'self' data: file: * blob:; frame-src * file:" />
content="default-src 'self'; connect-src blob: *; script-src 'self' 'unsafe-eval' 'unsafe-inline' *; worker-src 'self' blob:; style-src 'self' 'unsafe-inline' *; font-src 'self' data: *; img-src 'self' data: file: * blob:; media-src 'self' file:; frame-src * file:" />
<title>Cherry Studio</title>
<style>

View File

@ -5,6 +5,7 @@
*/
import { loggerService } from '@logger'
import { processKnowledgeReferences } from '@renderer/services/KnowledgeService'
import { BaseTool, MCPTool, MCPToolResponse, NormalToolResponse } from '@renderer/types'
import { Chunk, ChunkType } from '@renderer/types/chunk'
import type { ProviderMetadata, ToolSet, TypedToolCall, TypedToolResult } from 'ai'
@ -252,6 +253,18 @@ export class ToolCallChunkHandler {
response: output,
toolCallId: toolCallId
}
// 工具特定的后处理
switch (toolResponse.tool.name) {
case 'builtin_knowledge_search': {
processKnowledgeReferences(toolResponse.response?.knowledgeReferences, this.onChunk)
break
}
// 未来可以在这里添加其他工具的后处理逻辑
default:
break
}
// 从活跃调用中移除(交互结束后整个实例会被丢弃)
this.activeToolCalls.delete(toolCallId)

View File

@ -256,19 +256,27 @@ export abstract class BaseApiClient<
return defaultTimeout
}
public async getMessageContent(message: Message): Promise<string> {
public async getMessageContent(
message: Message
): Promise<{ textContent: string; imageContents: { fileId: string; fileExt: string }[] }> {
const content = getMainTextContent(message)
if (isEmpty(content)) {
return ''
return {
textContent: '',
imageContents: []
}
}
const webSearchReferences = await this.getWebSearchReferencesFromCache(message)
const knowledgeReferences = await this.getKnowledgeBaseReferencesFromCache(message)
const memoryReferences = this.getMemoryReferencesFromCache(message)
const knowledgeTextReferences = knowledgeReferences.filter((k) => k.metadata?.type !== 'image')
const knowledgeImageReferences = knowledgeReferences.filter((k) => k.metadata?.type === 'image')
// 添加偏移量以避免ID冲突
const reindexedKnowledgeReferences = knowledgeReferences.map((ref) => ({
const reindexedKnowledgeReferences = knowledgeTextReferences.map((ref) => ({
...ref,
id: ref.id + webSearchReferences.length // 为知识库引用的ID添加网络搜索引用的数量作为偏移量
}))
@ -277,12 +285,17 @@ export abstract class BaseApiClient<
logger.debug(`Found ${allReferences.length} references for ID: ${message.id}`, allReferences)
if (!isEmpty(allReferences)) {
const referenceContent = `\`\`\`json\n${JSON.stringify(allReferences, null, 2)}\n\`\`\``
return REFERENCE_PROMPT.replace('{question}', content).replace('{references}', referenceContent)
}
const referenceContent = `\`\`\`json\n${JSON.stringify(allReferences, null, 2)}\n\`\`\``
const imageReferences = knowledgeImageReferences.map((r) => {
return { fileId: r.metadata?.id, fileExt: r.metadata?.ext }
})
return content
return {
textContent: isEmpty(allReferences)
? content
: REFERENCE_PROMPT.replace('{question}', content).replace('{references}', referenceContent),
imageContents: isEmpty(knowledgeImageReferences) ? [] : imageReferences
}
}
/**

View File

@ -187,6 +187,10 @@ export class AnthropicAPIClient extends BaseApiClient<
}
}
private static isValidBase64ImageMediaType(mime: string): mime is Base64ImageSource['media_type'] {
return ['image/jpeg', 'image/png', 'image/gif', 'image/webp'].includes(mime)
}
/**
* Get the message parameter
* @param message - The message
@ -194,13 +198,34 @@ export class AnthropicAPIClient extends BaseApiClient<
* @returns The message parameter
*/
public async convertMessageToSdkParam(message: Message): Promise<AnthropicSdkMessageParam> {
const { textContent, imageContents } = await this.getMessageContent(message)
const parts: MessageParam['content'] = [
{
type: 'text',
text: await this.getMessageContent(message)
text: textContent
}
]
if (imageContents.length > 0) {
for (const imageContent of imageContents) {
const base64Data = await window.api.file.base64Image(imageContent.fileId + imageContent.fileExt)
base64Data.mime = base64Data.mime.replace('jpg', 'jpeg')
if (AnthropicAPIClient.isValidBase64ImageMediaType(base64Data.mime)) {
parts.push({
type: 'image',
source: {
data: base64Data.base64,
media_type: base64Data.mime,
type: 'base64'
}
})
} else {
logger.warn('Unsupported image type, ignored.', { mime: base64Data.mime })
}
}
}
// Get and process image blocks
const imageBlocks = findImageBlocks(message)
for (const imageBlock of imageBlocks) {

View File

@ -622,7 +622,7 @@ export class AwsBedrockAPIClient extends BaseApiClient<
}
public async convertMessageToSdkParam(message: Message): Promise<AwsBedrockSdkMessageParam> {
const content = await this.getMessageContent(message)
const { textContent, imageContents } = await this.getMessageContent(message)
const parts: Array<{
text?: string
image?: {
@ -638,8 +638,29 @@ export class AwsBedrockAPIClient extends BaseApiClient<
}> = []
// 添加文本内容 - 只在有非空内容时添加
if (content && content.trim()) {
parts.push({ text: content })
if (textContent && textContent.trim()) {
parts.push({ text: textContent })
}
if (imageContents.length > 0) {
for (const imageContent of imageContents) {
try {
const image = await window.api.file.base64Image(imageContent.fileId + imageContent.fileExt)
const mimeType = image.mime || 'image/png'
const base64Data = image.base64
const awsImage = convertBase64ImageToAwsBedrockFormat(base64Data, mimeType)
if (awsImage) {
parts.push({ image: awsImage })
} else {
// 不支持的格式,转换为文本描述
parts.push({ text: `[Image: ${mimeType}]` })
}
} catch (error) {
logger.error('Error processing image:', error as Error)
parts.push({ text: '[Image processing failed]' })
}
}
}
// 处理图片内容

View File

@ -211,7 +211,7 @@ export class GeminiAPIClient extends BaseApiClient<
inlineData: {
data,
mimeType
} as Part['inlineData']
}
}
}
@ -225,8 +225,22 @@ export class GeminiAPIClient extends BaseApiClient<
// If file is not found, upload it to Gemini
const result = await window.api.fileService.upload(this.provider, file)
const remoteFile = result.originalFile?.file as File
return createPartFromUri(remoteFile.uri!, remoteFile.mimeType!)
const remoteFile = result.originalFile
if (!remoteFile) {
throw new Error('File upload failed, please try again')
}
if (remoteFile.type === 'gemini') {
const file = remoteFile.file
if (!file.uri) {
throw new Error('File URI is required but not found')
}
if (!file.mimeType) {
throw new Error('File MIME type is required but not found')
}
return createPartFromUri(file.uri, file.mimeType)
} else {
throw new Error('Unsupported file type for Gemini API')
}
}
/**
@ -236,7 +250,20 @@ export class GeminiAPIClient extends BaseApiClient<
*/
private async convertMessageToSdkParam(message: Message): Promise<Content> {
const role = message.role === 'user' ? 'user' : 'model'
const parts: Part[] = [{ text: await this.getMessageContent(message) }]
const { textContent, imageContents } = await this.getMessageContent(message)
const parts: Part[] = [{ text: textContent }]
if (imageContents.length > 0) {
for (const imageContent of imageContents) {
const image = await window.api.file.base64Image(imageContent.fileId + imageContent.fileExt)
parts.push({
inlineData: {
data: image.base64,
mimeType: image.mime
} satisfies Part['inlineData']
})
}
}
// Add any generated images from previous responses
const imageBlocks = findImageBlocks(message)
@ -256,7 +283,7 @@ export class GeminiAPIClient extends BaseApiClient<
inlineData: {
data: base64Data,
mimeType: mimeType
} as Part['inlineData']
} satisfies Part['inlineData']
})
}
}
@ -269,7 +296,7 @@ export class GeminiAPIClient extends BaseApiClient<
inlineData: {
data: base64Data.base64,
mimeType: base64Data.mime
} as Part['inlineData']
} satisfies Part['inlineData']
})
}
}
@ -283,7 +310,7 @@ export class GeminiAPIClient extends BaseApiClient<
inlineData: {
data: base64Data.base64,
mimeType: base64Data.mime
} as Part['inlineData']
} satisfies Part['inlineData']
})
}
@ -327,7 +354,7 @@ export class GeminiAPIClient extends BaseApiClient<
inlineData: {
data: base64Data,
mimeType: mimeType
} as Part['inlineData']
} satisfies Part['inlineData']
})
}
}
@ -340,7 +367,7 @@ export class GeminiAPIClient extends BaseApiClient<
inlineData: {
data: base64Data.base64,
mimeType: base64Data.mime
} as Part['inlineData']
} satisfies Part['inlineData']
})
}
}
@ -355,7 +382,7 @@ export class GeminiAPIClient extends BaseApiClient<
* @returns The safety settings
*/
private getSafetySettings(): SafetySetting[] {
const safetyThreshold = 'OFF' as HarmBlockThreshold
const safetyThreshold = HarmBlockThreshold.OFF
return [
{
@ -419,7 +446,7 @@ export class GeminiAPIClient extends BaseApiClient<
thinkingConfig: {
...(budget > 0 ? { thinkingBudget: budget } : {}),
includeThoughts: true
} as ThinkingConfig
} satisfies ThinkingConfig
}
}
@ -496,9 +523,7 @@ export class GeminiAPIClient extends BaseApiClient<
const isFirstMessage = history.length === 0
if (isFirstMessage && messageContents) {
const userMessageText =
messageContents.parts && messageContents.parts.length > 0
? (messageContents.parts[0] as Part).text || ''
: ''
messageContents.parts && messageContents.parts.length > 0 ? (messageContents.parts[0].text ?? '') : ''
const systemMessage = [
{
text:
@ -509,7 +534,7 @@ export class GeminiAPIClient extends BaseApiClient<
userMessageText +
'<end_of_turn>'
}
] as Part[]
] satisfies Part[]
if (messageContents && messageContents.parts) {
messageContents.parts[0] = systemMessage[0]
}
@ -580,7 +605,7 @@ export class GeminiAPIClient extends BaseApiClient<
if (isFirstThinkingChunk) {
controller.enqueue({
type: ChunkType.THINKING_START
} as ThinkingStartChunk)
} satisfies ThinkingStartChunk)
isFirstThinkingChunk = false
}
controller.enqueue({
@ -591,7 +616,7 @@ export class GeminiAPIClient extends BaseApiClient<
if (isFirstTextChunk) {
controller.enqueue({
type: ChunkType.TEXT_START
} as TextStartChunk)
} satisfies TextStartChunk)
isFirstTextChunk = false
}
controller.enqueue({
@ -624,7 +649,7 @@ export class GeminiAPIClient extends BaseApiClient<
results: candidate.groundingMetadata,
source: WebSearchSource.GEMINI
}
} as LLMWebSearchCompleteChunk)
} satisfies LLMWebSearchCompleteChunk)
}
if (toolCalls.length > 0) {
controller.enqueue({
@ -681,7 +706,7 @@ export class GeminiAPIClient extends BaseApiClient<
tool: mcpTool,
arguments: parsedArgs,
status: 'pending'
} as ToolCallResponse
} satisfies ToolCallResponse
}
public convertMcpToolResponseToSdkMessageParam(

View File

@ -379,32 +379,40 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
*/
public async convertMessageToSdkParam(message: Message, model: Model): Promise<OpenAISdkMessageParam> {
const isVision = isVisionModel(model)
const content = await this.getMessageContent(message)
const { textContent, imageContents } = await this.getMessageContent(message)
const fileBlocks = findFileBlocks(message)
const imageBlocks = findImageBlocks(message)
if (fileBlocks.length === 0 && imageBlocks.length === 0) {
return {
role: message.role === 'system' ? 'user' : message.role,
content
} as OpenAISdkMessageParam
}
// If the model does not support files, extract the file content
if (this.isNotSupportFiles) {
const fileContent = await this.extractFileContent(message)
return {
role: message.role === 'system' ? 'user' : message.role,
content: content + '\n\n---\n\n' + fileContent
content: textContent + '\n\n---\n\n' + fileContent
} as OpenAISdkMessageParam
}
// Check if we only have text content and no other media
if (fileBlocks.length === 0 && imageBlocks.length === 0 && imageContents.length === 0) {
return {
role: message.role === 'system' ? 'user' : message.role,
content: textContent
} as OpenAISdkMessageParam
}
// If the model supports files, add the file content to the message
const parts: ChatCompletionContentPart[] = []
if (content) {
parts.push({ type: 'text', text: content })
if (textContent) {
parts.push({ type: 'text', text: textContent })
}
if (imageContents.length > 0) {
for (const imageContent of imageContents) {
const image = await window.api.file.base64Image(imageContent.fileId + imageContent.fileExt)
parts.push({ type: 'image_url', image_url: { url: image.data } })
}
}
for (const imageBlock of imageBlocks) {

View File

@ -171,32 +171,43 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
public async convertMessageToSdkParam(message: Message, model: Model): Promise<OpenAIResponseSdkMessageParam> {
const isVision = isVisionModel(model)
const content = await this.getMessageContent(message)
const { textContent, imageContents } = await this.getMessageContent(message)
const fileBlocks = findFileBlocks(message)
const imageBlocks = findImageBlocks(message)
if (fileBlocks.length === 0 && imageBlocks.length === 0) {
if (fileBlocks.length === 0 && imageBlocks.length === 0 && imageContents.length === 0) {
if (message.role === 'assistant') {
return {
role: 'assistant',
content: content
content: textContent
}
} else {
return {
role: message.role === 'system' ? 'user' : message.role,
content: content ? [{ type: 'input_text', text: content }] : []
content: textContent ? [{ type: 'input_text', text: textContent }] : []
} as OpenAI.Responses.EasyInputMessage
}
}
const parts: OpenAI.Responses.ResponseInputContent[] = []
if (content) {
if (imageContents) {
parts.push({
type: 'input_text',
text: content
text: textContent
})
}
if (imageContents.length > 0) {
for (const imageContent of imageContents) {
const image = await window.api.file.base64Image(imageContent.fileId + imageContent.fileExt)
parts.push({
detail: 'auto',
type: 'input_image',
image_url: image.data
})
}
}
for (const imageBlock of imageBlocks) {
if (isVision) {
if (imageBlock.file) {

View File

@ -102,7 +102,8 @@ Call this tool to execute the search. You can optionally provide additional cont
content: ref.content,
sourceUrl: ref.sourceUrl,
type: ref.type,
file: ref.file
file: ref.file,
metadata: ref.metadata
}))
// const referenceContent = `\`\`\`json\n${JSON.stringify(knowledgeReferencesData, null, 2)}\n\`\`\``

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

View File

@ -0,0 +1,206 @@
import { UploadOutlined } from '@ant-design/icons'
import FileManager from '@renderer/services/FileManager'
import { loggerService } from '@renderer/services/LoggerService'
import { FileMetadata } from '@renderer/types'
import { mime2type, uuid } from '@renderer/utils'
import { Modal, Space, Upload } from 'antd'
import type { UploadFile } from 'antd/es/upload/interface'
import { useState } from 'react'
import { useTranslation } from 'react-i18next'
import { TopView } from '../TopView'
const logger = loggerService.withContext('Video Popup')
const { Dragger } = Upload
export interface VideoUploadResult {
videoFile: FileMetadata
srtFile: FileMetadata
}
interface VideoPopupShowParams {
title: string
}
interface Props extends VideoPopupShowParams {
resolve: (value: VideoUploadResult | null) => void
}
type UploadType = 'video' | 'srt'
interface SingleFileUploaderProps {
uploadType: UploadType
accept: string
title: string
hint: string
fileList: UploadFile[]
onUpload: (file: File) => void
onRemove: () => void
}
const SingleFileUploader: React.FC<SingleFileUploaderProps> = ({
uploadType,
accept,
title,
hint,
fileList,
onUpload,
onRemove
}) => {
const { t } = useTranslation()
return (
<div>
<div style={{ marginBottom: '8px', fontWeight: 'bold' }}>{title}</div>
<Dragger
name={uploadType}
accept={accept}
maxCount={1}
fileList={fileList}
customRequest={({ file }) => {
if (file instanceof File) {
onUpload(file)
} else {
logger.error('Upload failed: Invalid file format')
}
}}
onRemove={onRemove}>
<p className="ant-upload-drag-icon">
<UploadOutlined />
</p>
<p className="ant-upload-text">{t('knowledge.drag_file')}</p>
<p className="ant-upload-hint">{hint}</p>
</Dragger>
</div>
)
}
const VideoPopupContainer: React.FC<Props> = ({ title, resolve }) => {
const [open, setOpen] = useState(true)
const [result, setResult] = useState<VideoUploadResult | null>(null)
const [videoFile, setVideoFile] = useState<FileMetadata | null>(null)
const [srtFile, setSrtFile] = useState<FileMetadata | null>(null)
const [videoFileList, setVideoFileList] = useState<UploadFile[]>([])
const [srtFileList, setSrtFileList] = useState<UploadFile[]>([])
const { t } = useTranslation()
const handleFileUpload = async (
file: File,
uploadType: UploadType,
setFile: (data: FileMetadata | null) => void,
setFileList: (list: UploadFile[]) => void
) => {
const tempId = uuid()
const tempFile: UploadFile = {
uid: tempId,
name: file.name,
status: 'uploading'
}
setFileList([tempFile])
try {
const newFileMetadata: FileMetadata = {
id: uuid(),
name: file.name,
path: window.api.file.getPathForFile(file),
size: file.size,
ext: `.${file.name.split('.').pop()?.toLowerCase()}`,
count: 1,
origin_name: file.name,
type: mime2type(file.type),
created_at: new Date().toISOString()
}
const uploadedFile = await FileManager.uploadFile(newFileMetadata)
setFile(uploadedFile)
setFileList([{ ...tempFile, status: 'done', url: uploadedFile.path }])
} catch (error) {
logger.error(`Failed to upload ${uploadType} file: ${error}`)
setFileList([{ ...tempFile, status: 'error', response: '上传失败' }])
setFile(null)
}
}
const handleFileRemove = (
setFile: (data: FileMetadata | null) => void,
setFileList: (list: UploadFile[]) => void
) => {
setFile(null)
setFileList([])
return true
}
const onOk = () => {
if (videoFile && srtFile) {
setResult({ videoFile, srtFile })
setOpen(false)
}
}
const onCancel = () => {
setResult(null)
setOpen(false)
}
const onAfterClose = () => {
resolve(result)
TopView.hide(TopViewKey)
}
VideoPopup.hide = onCancel
const isOkButtonDisabled = !videoFile || !srtFile
return (
<Modal
title={title}
open={open}
onOk={onOk}
onCancel={onCancel}
afterClose={onAfterClose}
transitionName="animation-move-down"
centered
width={600}
okButtonProps={{ disabled: isOkButtonDisabled }}
okText={t('common.confirm')}
cancelText={t('common.cancel')}>
<Space direction="vertical" style={{ width: '100%', gap: '16px' }}>
<SingleFileUploader
uploadType="video"
accept="video/*"
title={t('knowledge.videos_file')}
hint={t('knowledge.file_hint', { file_types: 'MP4, AVI, MKV, MOV' })}
fileList={videoFileList}
onUpload={(file) => handleFileUpload(file, 'video', setVideoFile, setVideoFileList)}
onRemove={() => handleFileRemove(setVideoFile, setVideoFileList)}
/>
<SingleFileUploader
uploadType="srt"
accept=".srt"
title={t('knowledge.subtitle_file')}
hint={t('knowledge.file_hint', { file_types: 'SRT' })}
fileList={srtFileList}
onUpload={(file) => handleFileUpload(file, 'srt', setSrtFile, setSrtFileList)}
onRemove={() => handleFileRemove(setSrtFile, setSrtFileList)}
/>
</Space>
</Modal>
)
}
const TopViewKey = 'VideoPopup'
export default class VideoPopup {
static topviewId = 0
static hide() {
TopView.hide(TopViewKey)
}
static show(props: VideoPopupShowParams) {
return new Promise<VideoUploadResult | null>((resolve) => {
TopView.show(<VideoPopupContainer {...props} resolve={resolve} />, TopViewKey)
})
}
}

View File

@ -1,6 +1,7 @@
import {
BuiltinOcrProvider,
BuiltinOcrProviderId,
OcrPpocrProvider,
OcrProviderCapability,
OcrSystemProvider,
OcrTesseractProvider,
@ -37,9 +38,22 @@ const systemOcr: OcrSystemProvider = {
}
} as const satisfies OcrSystemProvider
const ppocrOcr: OcrPpocrProvider = {
id: 'paddleocr',
name: 'PaddleOCR',
config: {
apiUrl: ''
},
capabilities: {
image: true
// pdf: true
}
} as const
export const BUILTIN_OCR_PROVIDERS_MAP = {
tesseract,
system: systemOcr
system: systemOcr,
paddleocr: ppocrOcr
} as const satisfies Record<BuiltinOcrProviderId, BuiltinOcrProvider>
export const BUILTIN_OCR_PROVIDERS: BuiltinOcrProvider[] = Object.values(BUILTIN_OCR_PROVIDERS_MAP)

View File

@ -1,4 +1,10 @@
import { CustomTranslateLanguage, FileMetadata, KnowledgeItem, QuickPhrase, TranslateHistory } from '@renderer/types'
import {
CustomTranslateLanguage,
FileMetadata,
KnowledgeNoteItem,
QuickPhrase,
TranslateHistory
} from '@renderer/types'
// Import necessary types for blocks and new message structure
import type { Message as NewMessage, MessageBlock } from '@renderer/types/newMessage'
import { NotesTreeNode } from '@renderer/types/note'
@ -13,7 +19,7 @@ export const db = new Dexie('CherryStudio', {
files: EntityTable<FileMetadata, 'id'>
topics: EntityTable<{ id: string; messages: NewMessage[] }, 'id'> // Correct type for topics
settings: EntityTable<{ id: string; value: any }, 'id'>
knowledge_notes: EntityTable<KnowledgeItem, 'id'>
knowledge_notes: EntityTable<KnowledgeNoteItem, 'id'>
translate_history: EntityTable<TranslateHistory, 'id'>
quick_phrases: EntityTable<QuickPhrase, 'id'>
message_blocks: EntityTable<MessageBlock, 'id'> // Correct type for message_blocks

View File

@ -15,21 +15,32 @@ import {
updateItemProcessingStatus,
updateNotes
} from '@renderer/store/knowledge'
import { addFilesThunk, addItemThunk, addNoteThunk } from '@renderer/store/thunk/knowledgeThunk'
import { FileMetadata, KnowledgeBase, KnowledgeItem, ProcessingStatus } from '@renderer/types'
import { runAsyncFunction } from '@renderer/utils'
import { addFilesThunk, addItemThunk, addNoteThunk, addVedioThunk } from '@renderer/store/thunk/knowledgeThunk'
import {
FileMetadata,
isKnowledgeFileItem,
isKnowledgeNoteItem,
isKnowledgeVideoItem,
KnowledgeBase,
KnowledgeItem,
KnowledgeNoteItem,
MigrationModeEnum,
ProcessingStatus
} from '@renderer/types'
import { runAsyncFunction, uuid } from '@renderer/utils'
import dayjs from 'dayjs'
import { cloneDeep } from 'lodash'
import { useCallback, useEffect, useRef, useState } from 'react'
import { useCallback, useEffect, useState } from 'react'
import { useDispatch, useSelector } from 'react-redux'
import { useAgents } from './useAgents'
import { useAssistants } from './useAssistant'
import { useTimer } from './useTimer'
export const useKnowledge = (baseId: string) => {
const dispatch = useAppDispatch()
const base = useSelector((state: RootState) => state.knowledge.bases.find((b) => b.id === baseId))
const checkTimerRef = useRef<NodeJS.Timeout>(undefined)
const { setTimeoutTimer } = useTimer()
// 重命名知识库
const renameKnowledgeBase = (name: string) => {
@ -41,16 +52,11 @@ export const useKnowledge = (baseId: string) => {
dispatch(updateBase(base))
}
useEffect(() => {
return () => {
clearTimeout(checkTimerRef.current)
}
}, [])
// 检查知识库
const checkAllBases = () => {
clearTimeout(checkTimerRef.current)
checkTimerRef.current = setTimeout(() => KnowledgeQueue.checkAllBases(), 0)
// 这个也许也会多任务?
const id = uuid()
setTimeoutTimer(id, () => KnowledgeQueue.checkAllBases(), 0)
}
// 批量添加文件
@ -82,6 +88,13 @@ export const useKnowledge = (baseId: string) => {
dispatch(addItemThunk(baseId, 'directory', path))
checkAllBases()
}
// add video support
const addVideo = (files: FileMetadata[]) => {
dispatch(addVedioThunk(baseId, 'video', files))
checkAllBases()
}
// 更新笔记内容
const updateNoteContent = async (noteId: string, content: string) => {
const note = await db.knowledge_notes.get(noteId)
@ -110,18 +123,28 @@ export const useKnowledge = (baseId: string) => {
// 移除项目
const removeItem = async (item: KnowledgeItem) => {
dispatch(removeItemAction({ baseId, item }))
if (base) {
if (item?.uniqueId && item?.uniqueIds) {
await window.api.knowledgeBase.remove({
uniqueId: item.uniqueId,
uniqueIds: item.uniqueIds,
base: getKnowledgeBaseParams(base)
})
}
if (!base || !item?.uniqueId || !item?.uniqueIds) {
return
}
if (item.type === 'file' && typeof item.content === 'object') {
const removalParams = {
uniqueId: item.uniqueId,
uniqueIds: item.uniqueIds,
base: getKnowledgeBaseParams(base)
}
await window.api.knowledgeBase.remove(removalParams)
if (isKnowledgeFileItem(item) && typeof item.content === 'object' && !Array.isArray(item.content)) {
const file = item.content
// name: eg. text.pdf
await window.api.file.delete(item.content.name)
await window.api.file.delete(file.name)
} else if (isKnowledgeVideoItem(item)) {
// video item has srt and video files
const files = item.content
const deletePromises = files.map((file) => window.api.file.delete(file.name))
await Promise.allSettled(deletePromises)
}
}
// 刷新项目
@ -132,6 +155,9 @@ export const useKnowledge = (baseId: string) => {
return
}
if (!base || !item?.uniqueId || !item?.uniqueIds) {
return
}
if (base && item.uniqueId && item.uniqueIds) {
await window.api.knowledgeBase.remove({
uniqueId: item.uniqueId,
@ -148,6 +174,24 @@ export const useKnowledge = (baseId: string) => {
})
checkAllBases()
}
const removalParams = {
uniqueId: item.uniqueId,
uniqueIds: item.uniqueIds,
base: getKnowledgeBaseParams(base)
}
await window.api.knowledgeBase.remove(removalParams)
updateItem({
...item,
processingStatus: 'pending',
processingProgress: 0,
processingError: '',
uniqueId: undefined,
updated_at: Date.now()
})
setTimeout(() => KnowledgeQueue.checkAllBases(), 0)
}
// 更新处理状态
@ -187,7 +231,7 @@ export const useKnowledge = (baseId: string) => {
}
// 迁移知识库(保留原知识库)
const migrateBase = async (newBase: KnowledgeBase) => {
const migrateBase = async (newBase: KnowledgeBase, mode: MigrationModeEnum) => {
if (!base) return
const timestamp = dayjs().format('YYMMDDHHmmss')
@ -200,8 +244,13 @@ export const useKnowledge = (baseId: string) => {
name: newName,
created_at: Date.now(),
updated_at: Date.now(),
items: []
} as KnowledgeBase
items: [],
framework: mode === MigrationModeEnum.MigrationToLangChain ? 'langchain' : base.framework
} satisfies KnowledgeBase
if (mode === MigrationModeEnum.MigrationToLangChain) {
await window.api.knowledgeBase.create(getKnowledgeBaseParams(migratedBase))
}
dispatch(addBase(migratedBase))
@ -212,23 +261,27 @@ export const useKnowledge = (baseId: string) => {
switch (item.type) {
case 'file':
if (typeof item.content === 'object' && item.content !== null && 'path' in item.content) {
files.push(item.content as FileMetadata)
files.push(item.content)
}
break
case 'note':
try {
const note = await db.knowledge_notes.get(item.id)
const content = (note?.content || '') as string
const content = note?.content || ''
await dispatch(addNoteThunk(newBase.id, content))
} catch (error) {
throw new Error(`Failed to migrate note item ${item.id}: ${error}`)
}
break
default:
try {
dispatch(addItemThunk(newBase.id, item.type, item.content as string))
} catch (error) {
throw new Error(`Failed to migrate item ${item.id}: ${error}`)
if (typeof item.content === 'string') {
try {
dispatch(addItemThunk(newBase.id, item.type, item.content))
} catch (error) {
throw new Error(`Failed to migrate item ${item.id}: ${error}`)
}
} else {
throw new Error(`Not a valid item: ${JSON.stringify(item)}`)
}
break
}
@ -250,17 +303,18 @@ export const useKnowledge = (baseId: string) => {
const urlItems = base?.items.filter((item) => item.type === 'url') || []
const sitemapItems = base?.items.filter((item) => item.type === 'sitemap') || []
const [noteItems, setNoteItems] = useState<KnowledgeItem[]>([])
const videoItems = base?.items.filter((item) => item.type === 'video') || []
useEffect(() => {
const notes = base?.items.filter((item) => item.type === 'note') || []
const notes = base?.items.filter(isKnowledgeNoteItem) ?? []
runAsyncFunction(async () => {
const newNoteItems = await Promise.all(
notes.map(async (item) => {
const note = await db.knowledge_notes.get(item.id)
return { ...item, content: note?.content || '' }
return { ...item, content: note?.content ?? '' } satisfies KnowledgeNoteItem
})
)
setNoteItems(newNoteItems.filter((note) => note !== undefined) as KnowledgeItem[])
setNoteItems(newNoteItems)
})
}, [base?.items])
@ -270,6 +324,7 @@ export const useKnowledge = (baseId: string) => {
urlItems,
sitemapItems,
noteItems,
videoItems,
renameKnowledgeBase,
updateKnowledgeBase,
migrateBase,
@ -277,6 +332,7 @@ export const useKnowledge = (baseId: string) => {
addUrl,
addSitemap,
addNote,
addVideo,
updateNoteContent,
getNoteContent,
updateItem,
@ -307,7 +363,9 @@ export const useKnowledgeBases = () => {
}
const deleteKnowledgeBase = (baseId: string) => {
dispatch(deleteBase({ baseId }))
const base = bases.find((b) => b.id === baseId)
if (!base) return
dispatch(deleteBase({ baseId, baseParams: getKnowledgeBaseParams(base) }))
// remove assistant knowledge_base
const _assistants = assistants.map((assistant) => {

View File

@ -4,7 +4,7 @@ import { useProviders } from '@renderer/hooks/useProvider'
import { getModelUniqId } from '@renderer/services/ModelService'
import { KnowledgeBase } from '@renderer/types'
import { nanoid } from 'nanoid'
import { useCallback, useMemo, useState } from 'react'
import { useCallback, useEffect, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'
const createInitialKnowledgeBase = (): KnowledgeBase => ({
@ -14,7 +14,11 @@ const createInitialKnowledgeBase = (): KnowledgeBase => ({
items: [],
created_at: Date.now(),
updated_at: Date.now(),
version: 1
version: 1,
framework: 'langchain',
retriever: {
mode: 'hybrid'
}
})
/**
@ -41,6 +45,12 @@ export const useKnowledgeBaseForm = (base?: KnowledgeBase) => {
const { providers } = useProviders()
const { preprocessProviders } = usePreprocessProviders()
useEffect(() => {
if (base) {
setNewBase(base)
}
}, [base])
const selectedDocPreprocessProvider = useMemo(
() => newBase.preprocessProvider?.provider,
[newBase.preprocessProvider]

View File

@ -1,4 +1,5 @@
import { loggerService } from '@logger'
import PaddleocrLogo from '@renderer/assets/images/providers/paddleocr.png'
import TesseractLogo from '@renderer/assets/images/providers/Tesseract.js.png'
import { BUILTIN_OCR_PROVIDERS_MAP, DEFAULT_OCR_PROVIDER } from '@renderer/config/ocr'
import { getBuiltinOcrProviderLabel } from '@renderer/i18n/label'
@ -80,6 +81,8 @@ export const useOcrProviders = () => {
return <Avatar size={size} src={TesseractLogo} />
case 'system':
return <MonitorIcon size={size} />
case 'paddleocr':
return <Avatar size={size} src={PaddleocrLogo} />
}
}
return <FileQuestionMarkIcon size={size} />

View File

@ -116,7 +116,7 @@ export function useMessageStyle() {
}
}
export const getStoreSetting = (key: keyof SettingsState) => {
export const getStoreSetting = <K extends keyof SettingsState>(key: K): SettingsState[K] => {
return store.getState().settings[key]
}

View File

@ -45,7 +45,7 @@ export const useTimer = () => {
// 组件卸载时自动清理所有定时器
useEffect(() => {
return clearAllTimers
return () => clearAllTimers()
}, [])
/**

View File

@ -327,10 +327,12 @@ export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {
const builtinOcrProviderKeyMap = {
system: 'ocr.builtin.system',
tesseract: ''
tesseract: '',
paddleocr: ''
} as const satisfies Record<BuiltinOcrProviderId, string>
export const getBuiltinOcrProviderLabel = (key: BuiltinOcrProviderId) => {
if (key === 'tesseract') return 'Tesseract'
else if (key == 'paddleocr') return 'PaddleOCR'
else return getLabel(builtinOcrProviderKeyMap, key)
}

View File

@ -963,9 +963,11 @@
},
"add_directory": "Add Directory",
"add_file": "Add File",
"add_image": "Add Image",
"add_note": "Add Note",
"add_sitemap": "Website Map",
"add_url": "Add URL",
"add_video": "Add video",
"cancel_index": "Cancel Indexing",
"chunk_overlap": "Chunk Overlap",
"chunk_overlap_placeholder": "Default (not recommended to change)",
@ -992,6 +994,7 @@
"document_count_default": "Default",
"document_count_help": "The more document chunks requested, the more information is included, but the more tokens are consumed",
"drag_file": "Drag file here",
"drag_image": "Drag image here",
"edit_remark": "Edit Remark",
"edit_remark_placeholder": "Please enter remark content",
"embedding_model": "Embedding Model",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "Knowledge base creation failed",
"failed_to_edit": "Knowledge base editing failed",
"model_invalid": "No model selected"
"model_invalid": "No model selected",
"video": {
"local_file_missing": "Video file not found",
"youtube_url_missing": "YouTube video URL not found"
}
},
"file_hint": "Support {{file_types}}",
"image_hint": "Support {{image_types}}",
"images": "Images",
"index_all": "Index All",
"index_cancelled": "Indexing cancelled",
"index_started": "Indexing started",
@ -1019,6 +1028,10 @@
"error": {
"failed": "Migration failed"
},
"migrate_to_langchain": {
"content": "The knowledge base migration does not delete the old knowledge base but creates a copy and reprocesses all entries, which may consume a significant number of tokens. Please proceed with caution.",
"info": "The knowledge base architecture has been updated. Click to migrate to the new architecture."
},
"source_dimensions": "Source Dimensions",
"source_model": "Source Model",
"target_dimensions": "Target Dimensions",
@ -1037,6 +1050,20 @@
"quota": "{{name}} Left Quota: {{quota}}",
"quota_infinity": "{{name}} Quota: Unlimited",
"rename": "Rename",
"retriever": "Retrieve mode",
"retriever_bm25": "full-text search",
"retriever_bm25_desc": "Search for documents based on keyword relevance and frequency.",
"retriever_hybrid": "Hybrid Search (Recommended)",
"retriever_hybrid_desc": "Combine keyword search and semantic search to achieve optimal retrieval accuracy.",
"retriever_hybrid_weight": {
"bm25": "full text",
"recommended": "recommend",
"title": "Hybrid Search Weight Adjustment (Full-text/Vector)",
"vector": "vector"
},
"retriever_tooltip": "Using different retrieval methods to search the knowledge base",
"retriever_vector": "vector search",
"retriever_vector_desc": "Retrieve documents based on semantic similarity and meaning.",
"search": "Search knowledge base",
"search_placeholder": "Enter text to search",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "Preprocessing Completed",
"status_preprocess_failed": "Preprocessing Failed",
"status_processing": "Processing",
"subtitle_file": "subtitle file",
"threshold": "Matching threshold",
"threshold_placeholder": "Not set",
"threshold_too_large_or_small": "Threshold cannot be greater than 1 or less than 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "The number of matching results returned; the larger the value, the more matching results, but also the more tokens consumed.",
"url_added": "URL added",
"url_placeholder": "Enter URL, multiple URLs separated by Enter",
"urls": "URLs"
"urls": "URLs",
"videos": "video",
"videos_file": "video file"
},
"languages": {
"arabic": "Arabic",
@ -1376,6 +1406,13 @@
"bubble": "Bubble",
"label": "Message style",
"plain": "Plain"
},
"video": {
"error": {
"local_file_missing": "Local video file path not found",
"unsupported_type": "Unsupported video type",
"youtube_url_missing": "YouTube video URL not found"
}
}
},
"processing": "Processing...",
@ -3884,6 +3921,13 @@
"title": "Image"
},
"image_provider": "OCR service provider",
"paddleocr": {
"aistudio_access_token": "Access token of AI Studio Community",
"aistudio_url_label": "AI Studio Community",
"api_url": "API URL",
"serving_doc_url_label": "PaddleOCR Serving Documentation",
"tip": "You can refer to the official PaddleOCR documentation to deploy a local service, or deploy a cloud service on the PaddlePaddle AI Studio Community. For the latter case, please provide the access token of the AI Studio Community."
},
"system": {
"win": {
"langs_tooltip": "Dependent on Windows to provide services, you need to download language packs in the system to support the relevant languages."
@ -4224,4 +4268,4 @@
"show_window": "Show Window",
"visualization": "Visualization"
}
}
}

View File

@ -963,9 +963,11 @@
},
"add_directory": "添加目录",
"add_file": "添加文件",
"add_image": "添加图片",
"add_note": "添加笔记",
"add_sitemap": "站点地图",
"add_url": "添加网址",
"add_video": "添加视频",
"cancel_index": "取消索引",
"chunk_overlap": "重叠大小",
"chunk_overlap_placeholder": "默认值(不建议修改)",
@ -992,6 +994,7 @@
"document_count_default": "默认",
"document_count_help": "请求文档片段数量越多,附带的信息越多,但需要消耗的 Token 也越多",
"drag_file": "拖拽文件到这里",
"drag_image": "拖拽图片到这里",
"edit_remark": "修改备注",
"edit_remark_placeholder": "请输入备注内容",
"embedding_model": "嵌入模型",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "知识库创建失败",
"failed_to_edit": "知识库编辑失败",
"model_invalid": "未选择模型"
"model_invalid": "未选择模型",
"video": {
"local_file_missing": "视频文件不存在",
"youtube_url_missing": "YouTube 视频链接不存在"
}
},
"file_hint": "支持 {{file_types}} 格式",
"image_hint": "支持 {{image_types}} 格式",
"images": "图片",
"index_all": "索引全部",
"index_cancelled": "索引已取消",
"index_started": "索引开始",
@ -1019,6 +1028,10 @@
"error": {
"failed": "迁移失败"
},
"migrate_to_langchain": {
"content": "知识库迁移不会删除旧知识库,而是创建一个副本之后重新处理所有知识库条目,可能消耗大量 tokens请谨慎操作。",
"info": "知识库架构已更新,点击迁移到新架构"
},
"source_dimensions": "源维度",
"source_model": "源模型",
"target_dimensions": "目标维度",
@ -1037,6 +1050,20 @@
"quota": "{{name}} 剩余额度:{{quota}}",
"quota_infinity": "{{name}} 剩余额度:无限制",
"rename": "重命名",
"retriever": "检索模式",
"retriever_bm25": "全文搜索",
"retriever_bm25_desc": "根据关键字的相关性和频率查找文档。",
"retriever_hybrid": "混合搜索 (推荐)",
"retriever_hybrid_desc": "结合关键词搜索和语义搜索,以实现最佳检索准确性。",
"retriever_hybrid_weight": {
"bm25": "全文",
"recommended": "推荐",
"title": "混合搜索权重调整 (全文/向量)",
"vector": "向量"
},
"retriever_tooltip": "使用不同的检索方式检索知识库",
"retriever_vector": "向量搜索",
"retriever_vector_desc": "根据语义相似性和含义查找文档。",
"search": "搜索知识库",
"search_placeholder": "输入查询内容",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "预处理完成",
"status_preprocess_failed": "预处理失败",
"status_processing": "处理中",
"subtitle_file": "字幕文件",
"threshold": "匹配度阈值",
"threshold_placeholder": "未设置",
"threshold_too_large_or_small": "阈值不能大于 1 或小于 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "返回的匹配结果数量,数值越大,匹配结果越多,但消耗的 Token 也越多",
"url_added": "网址已添加",
"url_placeholder": "请输入网址, 多个网址用回车分隔",
"urls": "网址"
"urls": "网址",
"videos": "视频",
"videos_file": "视频文件"
},
"languages": {
"arabic": "阿拉伯文",
@ -1376,6 +1406,13 @@
"bubble": "气泡",
"label": "消息样式",
"plain": "简洁"
},
"video": {
"error": {
"local_file_missing": "本地视频文件路径不存在",
"unsupported_type": "不支持的视频类型",
"youtube_url_missing": "YouTube 视频链接不存在"
}
}
},
"processing": "正在处理...",
@ -3884,6 +3921,13 @@
"title": "图片"
},
"image_provider": "OCR 服务提供商",
"paddleocr": {
"aistudio_access_token": "星河社区访问令牌",
"aistudio_url_label": "星河社区",
"api_url": "API URL",
"serving_doc_url_label": "PaddleOCR 服务化部署文档",
"tip": "您可以参考 PaddleOCR 官方文档部署本地服务,或者在飞桨星河社区部署云服务。对于后一种情况,请填写星河社区访问令牌。"
},
"system": {
"win": {
"langs_tooltip": "依赖 Windows 提供服务,您需要在系统中下载语言包来支持相关语言。"

View File

@ -963,9 +963,11 @@
},
"add_directory": "新增目錄",
"add_file": "新增檔案",
"add_image": "新增圖片",
"add_note": "新增筆記",
"add_sitemap": "網站地圖",
"add_url": "新增網址",
"add_video": "新增影片",
"cancel_index": "取消索引",
"chunk_overlap": "重疊大小",
"chunk_overlap_placeholder": "預設值(不建議修改)",
@ -992,6 +994,7 @@
"document_count_default": "預設",
"document_count_help": "請求文件片段數量越多,附帶的資訊越多,但需要消耗的 Token 也越多",
"drag_file": "拖拽檔案到這裡",
"drag_image": "拖曳圖片到這裡",
"edit_remark": "修改備註",
"edit_remark_placeholder": "請輸入備註內容",
"embedding_model": "嵌入模型",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "知識庫創建失敗",
"failed_to_edit": "知識庫編輯失敗",
"model_invalid": "未選擇模型"
"model_invalid": "未選擇模型",
"video": {
"local_file_missing": "影片檔案不存在",
"youtube_url_missing": "YouTube 影片連結不存在"
}
},
"file_hint": "支援 {{file_types}} 格式",
"image_hint": "支援 {{image_types}} 格式",
"images": "圖片",
"index_all": "索引全部",
"index_cancelled": "索引已取消",
"index_started": "索引開始",
@ -1019,6 +1028,10 @@
"error": {
"failed": "遷移失敗"
},
"migrate_to_langchain": {
"content": "知識庫遷移不會刪除舊知識庫,而是建立一個副本後重新處理所有知識庫條目,可能消耗大量 tokens請謹慎操作。",
"info": "知識庫架構已更新,點擊遷移到新架構"
},
"source_dimensions": "源維度",
"source_model": "源模型",
"target_dimensions": "目標維度",
@ -1037,6 +1050,20 @@
"quota": "{{name}} 剩餘配額:{{quota}}",
"quota_infinity": "{{name}} 配額:無限制",
"rename": "重新命名",
"retriever": "搜尋模式",
"retriever_bm25": "全文搜尋",
"retriever_bm25_desc": "根據關鍵字的相關性和頻率查找文件。",
"retriever_hybrid": "混合搜尋(推薦)",
"retriever_hybrid_desc": "結合關鍵字搜索和語義搜索,以實現最佳檢索準確性。",
"retriever_hybrid_weight": {
"bm25": "全文",
"recommended": "推薦",
"title": "混合搜尋權重調整 (全文/向量)",
"vector": "向量"
},
"retriever_tooltip": "使用不同的檢索方式檢索知識庫",
"retriever_vector": "向量搜尋",
"retriever_vector_desc": "根據語意相似性和含義查找文件。",
"search": "搜尋知識庫",
"search_placeholder": "輸入查詢內容",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "預處理完成",
"status_preprocess_failed": "預處理失敗",
"status_processing": "處理中",
"subtitle_file": "字幕檔案",
"threshold": "匹配度閾值",
"threshold_placeholder": "未設定",
"threshold_too_large_or_small": "閾值不能大於 1 或小於 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "返回的匹配結果數量,數值越大,匹配結果越多,但消耗的 Token 也越多",
"url_added": "網址已新增",
"url_placeholder": "請輸入網址,多個網址用換行符號分隔",
"urls": "網址"
"urls": "網址",
"videos": "影片",
"videos_file": "影片檔案"
},
"languages": {
"arabic": "阿拉伯文",
@ -1376,6 +1406,13 @@
"bubble": "氣泡",
"label": "訊息樣式",
"plain": "簡潔"
},
"video": {
"error": {
"local_file_missing": "本地視頻檔案路徑不存在",
"unsupported_type": "不支援的視頻類型",
"youtube_url_missing": "YouTube 視頻連結不存在"
}
}
},
"processing": "正在處理...",
@ -3884,6 +3921,13 @@
"title": "圖片"
},
"image_provider": "OCR 服務提供商",
"paddleocr": {
"aistudio_access_token": "星河社群存取權杖",
"aistudio_url_label": "星河社群",
"api_url": "API 網址",
"serving_doc_url_label": "PaddleOCR 服務化部署文件",
"tip": "您可以參考 PaddleOCR 官方文件來部署本機服務,或是在飛槳星河社群部署雲端服務。對於後者,請提供星河社群的存取權杖。"
},
"system": {
"win": {
"langs_tooltip": "依賴 Windows 提供服務,您需要在系統中下載語言包來支援相關語言。"
@ -4224,4 +4268,4 @@
"show_window": "顯示視窗",
"visualization": "視覺化"
}
}
}

View File

@ -963,9 +963,11 @@
},
"add_directory": "Προσθήκη καταλόγου",
"add_file": "Προσθήκη αρχείου",
"add_image": "Προσθήκη εικόνας",
"add_note": "Προσθήκη σημειώματος",
"add_sitemap": "Χάρτης τόπων",
"add_url": "Προσθήκη διευθύνσεως",
"add_video": "Προσθήκη βίντεο",
"cancel_index": "Άκυρη ευρετήριοποίηση",
"chunk_overlap": "Μέγεθος επιφάνειας",
"chunk_overlap_placeholder": "Προεπιλογή (δεν συνιστάται να το αλλάξετε)",
@ -992,6 +994,7 @@
"document_count_default": "Προεπιλογή",
"document_count_help": "Όσο μεγαλύτερη είναι η ποσότητα των κειμένων που ζητούνται, τόσο περισσότερες πληροφορίες παρέχονται, αλλά και οι καταναλωτικοί Token επειδή περισσότερα",
"drag_file": "Βάλτε το αρχείο εδώ",
"drag_image": "Σύρετε την εικόνα εδώ",
"edit_remark": "Μεταβολή σημειώματος",
"edit_remark_placeholder": "Εισάγετε το σημείωμα",
"embedding_model": "Μοντέλο ενσωμάτωσης",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "Αποτυχία δημιουργίας βάσης γνώσεων",
"failed_to_edit": "Αποτυχία επεξεργασίας βάσης γνώσεων",
"model_invalid": "Δεν έχει επιλεγεί μοντέλο ή έχει διαγραφεί"
"model_invalid": "Δεν έχει επιλεγεί μοντέλο ή έχει διαγραφεί",
"video": {
"local_file_missing": "Το αρχείο βίντεο δεν υπάρχει",
"youtube_url_missing": "Ο σύνδεσμος βίντεο του YouTube δεν υπάρχει"
}
},
"file_hint": "Υποστηρίζεται το {{file_types}} μορφάττων",
"image_hint": "Υποστηρίζει μορφές {{image_types}}",
"images": "εικόνα",
"index_all": "Ευρετήριοποίηση όλων",
"index_cancelled": "Η ευρετήριοποίηση διακόπηκε",
"index_started": "Η ευρετήριοποίηση ξεκίνησε",
@ -1019,6 +1028,10 @@
"error": {
"failed": "Αποτυχία μεταφοράς"
},
"migrate_to_langchain": {
"content": "Η μετανάστευση της βάσης γνώσεων δεν διαγράφει την παλιά βάση γνώσεων, αλλά δημιουργεί ένα αντίγραφο και στη συνέχεια επεξεργάζεται ξανά όλες τις εγγραφές της βάσης γνώσεων, κάτι που μπορεί να καταναλώσει μεγάλο αριθμό tokens, οπότε ενεργήστε με προσοχή.",
"info": "Η δομή της βάσης γνώσεων έχει ενημερωθεί, κάντε κλικ για μετεγκατάσταση στη νέα δομή"
},
"source_dimensions": "Πηγαίες διαστάσεις",
"source_model": "Πηγαίο μοντέλο",
"target_dimensions": "Προορισμένες διαστάσεις",
@ -1037,6 +1050,20 @@
"quota": "Διαθέσιμο όριο για {{name}}: {{quota}}",
"quota_infinity": "Διαθέσιμο όριο για {{name}}: Απεριόριστο",
"rename": "Μετονομασία",
"retriever": "Λειτουργία αναζήτησης",
"retriever_bm25": "Πλήρης αναζήτηση κειμένου",
"retriever_bm25_desc": "Αναζήτηση εγγράφων με βάση τη σχετικότητα και τη συχνότητα των λέξεων-κλειδιών.",
"retriever_hybrid": "Μικτή αναζήτηση (συνιστάται)",
"retriever_hybrid_desc": "Συνδυάστε την αναζήτηση με λέξεις-κλειδιά και την σημασιολογική αναζήτηση για την επίτευξη της βέλτιστης ακρίβειας ανάκτησης.",
"retriever_hybrid_weight": {
"bm25": "ολόκληρο το κείμενο",
"recommended": "Προτείνω",
"title": "Προσαρμογή βάρους μικτής αναζήτησης (πλήρες κείμενο/διανυσματικό)",
"vector": "διάνυσμα"
},
"retriever_tooltip": "Χρησιμοποιώντας διαφορετικές μεθόδους αναζήτησης για αναζήτηση στη βάση γνώσης",
"retriever_vector": "Αναζήτηση διανυσμάτων",
"retriever_vector_desc": "Βρείτε έγγραφα βάση της σημασιολογικής ομοιότητας και της έννοιας.",
"search": "Αναζήτηση βάσης γνώσεων",
"search_placeholder": "Εισάγετε την αναζήτηση",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "Η προεπεξεργασία ολοκληρώθηκε",
"status_preprocess_failed": "Η προεπεξεργασία απέτυχε",
"status_processing": "Επεξεργασία",
"subtitle_file": "[to be translated]:字幕文件",
"threshold": "Περιθώριο συνάφειας",
"threshold_placeholder": "Δεν έχει ρυθμιστεί",
"threshold_too_large_or_small": "Το περιθώριο δεν μπορεί να είναι μεγαλύτερο από 1 ή μικρότερο από 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "Η ποσότητα των επιστρεφόμενων αποτελεσμάτων που συνάφονται, όσο μεγαλύτερη είναι η τιμή, τόσο περισσότερα αποτελέσματα συνδέονται, αλλά και οι καταναλωτικοί Token επειδή περισσότερα",
"url_added": "Η διεύθυνση προστέθηκε",
"url_placeholder": "Εισάγετε τη διεύθυνση, χωρίστε πολλαπλές διευθύνσεις με επιστροφή",
"urls": "Διευθύνσεις"
"urls": "Διευθύνσεις",
"videos": "βίντεο",
"videos_file": "[to be translated]:视频文件"
},
"languages": {
"arabic": "Αραβικά",
@ -1376,6 +1406,13 @@
"bubble": "Αερογεύματα",
"label": "Στυλ μηνύματος",
"plain": "Απλός"
},
"video": {
"error": {
"local_file_missing": "Η διαδρομή του τοπικού αρχείου βίντεο δεν υπάρχει",
"unsupported_type": "Μη υποστηριζόμενος τύπος βίντεο",
"youtube_url_missing": "Ο σύνδεσμος βίντεο του YouTube δεν υπάρχει"
}
}
},
"processing": "Επεξεργασία...",
@ -3884,6 +3921,13 @@
"title": "Εικόνα"
},
"image_provider": "Πάροχοι υπηρεσιών OCR",
"paddleocr": {
"aistudio_access_token": "Διακριτικό πρόσβασης της κοινότητας AI Studio",
"aistudio_url_label": "Κοινότητα AI Studio",
"api_url": "Διεύθυνση URL API",
"serving_doc_url_label": "Τεκμηρίωση PaddleOCR Serving",
"tip": "Μπορείτε να ανατρέξετε στην επίσημη τεκμηρίωση του PaddleOCR για να αναπτύξετε μια τοπική υπηρεσία, ή να αναπτύξετε μια υπηρεσία στο cloud στην Κοινότητα PaddlePaddle AI Studio. Στη δεύτερη περίπτωση, παρακαλώ παρέχετε το διακριτικό πρόσβασης (access token) της Κοινότητας AI Studio."
},
"system": {
"win": {
"langs_tooltip": "Εξαρτάται από τα Windows για την παροχή υπηρεσιών, πρέπει να κατεβάσετε το πακέτο γλώσσας στο σύστημα για να υποστηρίξετε τις σχετικές γλώσσες."

View File

@ -963,9 +963,11 @@
},
"add_directory": "Agregar directorio",
"add_file": "Agregar archivo",
"add_image": "Añadir imagen",
"add_note": "Agregar nota",
"add_sitemap": "Mapa del sitio",
"add_url": "Agregar URL",
"add_video": "Añadir video",
"cancel_index": "Cancelar índice",
"chunk_overlap": "Superposición de fragmentos",
"chunk_overlap_placeholder": "Valor predeterminado (no recomendado para modificar)",
@ -992,6 +994,7 @@
"document_count_default": "Predeterminado",
"document_count_help": "Más fragmentos de documentos solicitados significa más información adjunta, pero también consume más tokens",
"drag_file": "Arrastre archivos aquí",
"drag_image": "Arrastra la imagen aquí",
"edit_remark": "Editar observación",
"edit_remark_placeholder": "Ingrese el contenido de la observación",
"embedding_model": "Modelo de incrustación",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "Error al crear la base de conocimientos",
"failed_to_edit": "Error al editar la base de conocimientos",
"model_invalid": "No se ha seleccionado un modelo o ha sido eliminado"
"model_invalid": "No se ha seleccionado un modelo o ha sido eliminado",
"video": {
"local_file_missing": "El archivo de video no existe.",
"youtube_url_missing": "El enlace del vídeo de YouTube no existe."
}
},
"file_hint": "Formatos soportados: {{file_types}}",
"image_hint": "Soporta formatos {{image_types}}",
"images": "Imagen",
"index_all": "Indexar todo",
"index_cancelled": "Índice cancelado",
"index_started": "Índice iniciado",
@ -1019,6 +1028,10 @@
"error": {
"failed": "Error en la migración"
},
"migrate_to_langchain": {
"content": "La migración de la base de conocimiento no elimina la base antigua, sino que crea una copia y luego reprocesa todas las entradas, lo que puede consumir una gran cantidad de tokens. Proceda con precaución.",
"info": "La estructura de la base de conocimiento ha sido actualizada. Haz clic para migrar a la nueva estructura."
},
"source_dimensions": "Dimensiones de origen",
"source_model": "Modelo de origen",
"target_dimensions": "Dimensiones de destino",
@ -1037,6 +1050,20 @@
"quota": "Cupo restante de {{name}}: {{quota}}",
"quota_infinity": "Cupo restante de {{name}}: ilimitado",
"rename": "Renombrar",
"retriever": "modo de recuperación",
"retriever_bm25": "búsqueda de texto completo",
"retriever_bm25_desc": "Encontrar documentos basados en la relevancia y frecuencia de las palabras clave.",
"retriever_hybrid": "Búsqueda híbrida (recomendada)",
"retriever_hybrid_desc": "Combinar la búsqueda por palabras clave con la búsqueda semántica para lograr la máxima precisión en la recuperación.",
"retriever_hybrid_weight": {
"bm25": "texto completo",
"recommended": "Recomendado",
"title": "Ajuste de ponderación en búsqueda híbrida (texto completo/vectorial)",
"vector": "vector"
},
"retriever_tooltip": "Usar diferentes métodos de búsqueda para consultar la base de conocimiento",
"retriever_vector": "búsqueda vectorial",
"retriever_vector_desc": "Buscar documentos según similitud semántica y significado.",
"search": "Buscar en la base de conocimientos",
"search_placeholder": "Ingrese el contenido de la consulta",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "Preprocesamiento completado",
"status_preprocess_failed": "Error en el preprocesamiento",
"status_processing": "Procesando",
"subtitle_file": "[to be translated]:字幕文件",
"threshold": "Umbral de coincidencia",
"threshold_placeholder": "No configurado",
"threshold_too_large_or_small": "El umbral no puede ser mayor que 1 o menor que 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "Número de resultados coincidentes devueltos, un valor más alto significa más resultados coincidentes, pero también consume más tokens",
"url_added": "URL agregada",
"url_placeholder": "Ingrese la URL, múltiples URLs separadas por enter",
"urls": "URLs"
"urls": "URLs",
"videos": "vídeo",
"videos_file": "[to be translated]:视频文件"
},
"languages": {
"arabic": "Árabe",
@ -1376,6 +1406,13 @@
"bubble": "Burbuja",
"label": "Estilo de mensaje",
"plain": "Simple"
},
"video": {
"error": {
"local_file_missing": "Ruta del archivo de video local no encontrada",
"unsupported_type": "Tipo de video no soportado",
"youtube_url_missing": "URL del video de YouTube no encontrada"
}
}
},
"processing": "Procesando...",
@ -3884,6 +3921,13 @@
"title": "Imagen"
},
"image_provider": "Proveedor de servicios OCR",
"paddleocr": {
"aistudio_access_token": "Token de acceso de la comunidad de AI Studio",
"aistudio_url_label": "Comunidad de AI Studio",
"api_url": "URL de la API",
"serving_doc_url_label": "Documentación de PaddleOCR Serving",
"tip": "Puede consultar la documentación oficial de PaddleOCR para implementar un servicio local, o implementar un servicio en la nube en la Comunidad de PaddlePaddle AI Studio. En este último caso, proporcione el token de acceso de la Comunidad de AI Studio."
},
"system": {
"win": {
"langs_tooltip": "Dependiendo de Windows para proporcionar servicios, necesita descargar el paquete de idioma en el sistema para admitir los idiomas correspondientes."

View File

@ -963,9 +963,11 @@
},
"add_directory": "Ajouter un répertoire",
"add_file": "Ajouter un fichier",
"add_image": "Ajouter une image",
"add_note": "Ajouter une note",
"add_sitemap": "Plan du site",
"add_url": "Ajouter une URL",
"add_video": "Ajouter une vidéo",
"cancel_index": "Annuler l'indexation",
"chunk_overlap": "Chevauchement de blocs",
"chunk_overlap_placeholder": "Valeur par défaut (ne pas modifier)",
@ -992,6 +994,7 @@
"document_count_default": "Par défaut",
"document_count_help": "Plus vous demandez de fragments de documents, plus d'informations sont fournies, mais plus de jetons sont consommés",
"drag_file": "Glissez-déposez un fichier ici",
"drag_image": "Faites glisser l'image ici",
"edit_remark": "Modifier la remarque",
"edit_remark_placeholder": "Entrez le contenu de la remarque",
"embedding_model": "Modèle d'intégration",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "Erreur lors de la création de la base de connaissances",
"failed_to_edit": "Erreur lors de la modification de la base de connaissances",
"model_invalid": "Aucun modèle sélectionné ou modèle supprimé"
"model_invalid": "Aucun modèle sélectionné ou modèle supprimé",
"video": {
"local_file_missing": "Le fichier vidéo n'existe pas.",
"youtube_url_missing": "Le lien de la vidéo YouTube n'existe pas."
}
},
"file_hint": "Format supporté : {{file_types}}",
"image_hint": "Prise en charge des formats {{image_types}}",
"images": "Image",
"index_all": "Indexer tout",
"index_cancelled": "L'indexation a été annulée",
"index_started": "L'indexation a commencé",
@ -1019,6 +1028,10 @@
"error": {
"failed": "Erreur lors de la migration"
},
"migrate_to_langchain": {
"content": "La migration de la base de connaissances ne supprime pas l'ancienne base, mais crée une copie avant de retraiter tous les éléments, ce qui peut consommer un grand nombre de tokens. Veuillez agir avec prudence.",
"info": "L'architecture de la base de connaissances a été mise à jour, cliquez pour migrer vers la nouvelle architecture."
},
"source_dimensions": "Dimensions source",
"source_model": "Modèle source",
"target_dimensions": "Dimensions cible",
@ -1037,6 +1050,20 @@
"quota": "Quota restant pour {{name}} : {{quota}}",
"quota_infinity": "Quota restant pour {{name}} : illimité",
"rename": "Renommer",
"retriever": "Mode de recherche",
"retriever_bm25": "Recherche plein texte",
"retriever_bm25_desc": "Rechercher des documents en fonction de la pertinence et de la fréquence des mots-clés.",
"retriever_hybrid": "Recherche hybride (recommandé)",
"retriever_hybrid_desc": "Associez la recherche par mots-clés et la recherche sémantique pour une précision de recherche optimale.",
"retriever_hybrid_weight": {
"bm25": "texte intégral",
"recommended": "Recommandé",
"title": "Ajustement des pondérations de recherche hybride (texte intégral/vecteur)",
"vector": "vecteur"
},
"retriever_tooltip": "Utiliser différentes méthodes de recherche pour interroger la base de connaissances",
"retriever_vector": "Recherche vectorielle",
"retriever_vector_desc": "Rechercher des documents selon la similarité sémantique et le sens.",
"search": "Rechercher dans la base de connaissances",
"search_placeholder": "Entrez votre requête",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "Prétraitement terminé",
"status_preprocess_failed": "Échec du prétraitement",
"status_processing": "En cours de traitement",
"subtitle_file": "[to be translated]:字幕文件",
"threshold": "Seuil de similarité",
"threshold_placeholder": "Non défini",
"threshold_too_large_or_small": "Le seuil ne peut pas être supérieur à 1 ou inférieur à 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "Nombre de résultats de correspondance retournés, plus le chiffre est élevé, plus il y a de résultats de correspondance, mais plus de jetons sont consommés",
"url_added": "URL ajoutée",
"url_placeholder": "Entrez l'URL, plusieurs URLs séparées par des sauts de ligne",
"urls": "URLs"
"urls": "URLs",
"videos": "vidéo",
"videos_file": "[to be translated]:视频文件"
},
"languages": {
"arabic": "Arabe",
@ -1376,6 +1406,13 @@
"bubble": "Bulles",
"label": "Style du message",
"plain": "Simplifié"
},
"video": {
"error": {
"local_file_missing": "Chemin du fichier vidéo local introuvable",
"unsupported_type": "Type de vidéo non supporté",
"youtube_url_missing": "URL de la vidéo YouTube introuvable"
}
}
},
"processing": "En cours de traitement...",
@ -3884,6 +3921,13 @@
"title": "Image"
},
"image_provider": "Fournisseur de service OCR",
"paddleocr": {
"aistudio_access_token": "Jeton daccès de la communauté AI Studio",
"aistudio_url_label": "Communauté AI Studio",
"api_url": "URL de lAPI",
"serving_doc_url_label": "Documentation de PaddleOCR Serving",
"tip": "Vous pouvez consulter la documentation officielle de PaddleOCR pour déployer un service local, ou déployer un service cloud sur la Communauté PaddlePaddle AI Studio. Dans ce dernier cas, veuillez fournir le jeton daccès de la Communauté AI Studio."
},
"system": {
"win": {
"langs_tooltip": "Dépendre de Windows pour fournir des services, vous devez télécharger des packs linguistiques dans le système afin de prendre en charge les langues concernées."

View File

@ -963,9 +963,11 @@
},
"add_directory": "ディレクトリを追加",
"add_file": "ファイルを追加",
"add_image": "画像を追加",
"add_note": "ノートを追加",
"add_sitemap": "サイトマップを追加",
"add_url": "URLを追加",
"add_video": "動画を追加",
"cancel_index": "インデックスをキャンセル",
"chunk_overlap": "チャンクの重なり",
"chunk_overlap_placeholder": "デフォルト(変更しないでください)",
@ -992,6 +994,7 @@
"document_count_default": "デフォルト",
"document_count_help": "要求されたドキュメント分段数が多いほど、付随する情報が多くなりますが、トークンの消費量も増加します",
"drag_file": "ファイルをここにドラッグ",
"drag_image": "画像をここにドラッグ",
"edit_remark": "備考を編集",
"edit_remark_placeholder": "備考内容を入力してください",
"embedding_model": "埋め込みモデル",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "ナレッジベースの作成に失敗しました",
"failed_to_edit": "ナレッジベースの編集に失敗しました",
"model_invalid": "モデルが選択されていません"
"model_invalid": "モデルが選択されていません",
"video": {
"local_file_missing": "動画ファイルが見つかりません",
"youtube_url_missing": "YouTube動画のURLが見つかりません"
}
},
"file_hint": "{{file_types}} 形式をサポート",
"image_hint": "{{image_types}} 形式に対応しています",
"images": "画像",
"index_all": "すべてをインデックス",
"index_cancelled": "インデックスがキャンセルされました",
"index_started": "インデックスを開始",
@ -1019,6 +1028,10 @@
"error": {
"failed": "移行が失敗しました"
},
"migrate_to_langchain": {
"content": "ナレッジベースの移行は旧ナレッジベースを削除せず、すべてのエントリーを再処理したコピーを作成します。大量のトークンを消費する可能性があるため、操作には十分注意してください。",
"info": "ナレッジベースのアーキテクチャが更新されました、新しいアーキテクチャに移行するにはクリックしてください"
},
"source_dimensions": "ソース次元",
"source_model": "ソースモデル",
"target_dimensions": "ターゲット次元",
@ -1037,6 +1050,20 @@
"quota": "{{name}} 残りクォータ: {{quota}}",
"quota_infinity": "{{name}} クォータ: 無制限",
"rename": "名前を変更",
"retriever": "検索モード",
"retriever_bm25": "全文検索",
"retriever_bm25_desc": "キーワードの関連性と頻度に基づいてドキュメントを検索します。",
"retriever_hybrid": "ハイブリッド検索(おすすめ)",
"retriever_hybrid_desc": "キーワード検索と意味検索を組み合わせて、最高の検索精度を実現します。",
"retriever_hybrid_weight": {
"bm25": "全文(ぜんぶん)",
"recommended": "おすすめ",
"title": "ハイブリッド検索の重み付け調整 (全文/ベクトル)",
"vector": "ベクトル"
},
"retriever_tooltip": "異なる検索方法を使用してナレッジベースを検索する",
"retriever_vector": "ベクトル検索",
"retriever_vector_desc": "意味的な類似性と意味に基づいて文書を検索します。",
"search": "ナレッジベースを検索",
"search_placeholder": "検索するテキストを入力",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "前処理完了",
"status_preprocess_failed": "前処理に失敗しました",
"status_processing": "処理中",
"subtitle_file": "字幕ファイル",
"threshold": "マッチング度閾値",
"threshold_placeholder": "未設置",
"threshold_too_large_or_small": "しきい値は0より大きく1より小さい必要があります",
@ -1069,7 +1097,9 @@
"topN_tooltip": "返されるマッチ結果の数は、数値が大きいほどマッチ結果が多くなりますが、消費されるトークンも増えます。",
"url_added": "URLが追加されました",
"url_placeholder": "URLを入力, 複数のURLはEnterで区切る",
"urls": "URL"
"urls": "URL",
"videos": "動画",
"videos_file": "動画ファイル"
},
"languages": {
"arabic": "アラビア語",
@ -1376,6 +1406,13 @@
"bubble": "バブル",
"label": "メッセージスタイル",
"plain": "プレーン"
},
"video": {
"error": {
"local_file_missing": "ローカル動画ファイルのパスが見つかりません",
"unsupported_type": "サポートされていない動画タイプです",
"youtube_url_missing": "YouTube動画のURLが見つかりません"
}
}
},
"processing": "処理中...",
@ -3884,6 +3921,13 @@
"title": "画像"
},
"image_provider": "OCRサービスプロバイダー",
"paddleocr": {
"aistudio_access_token": "AI Studio Community のアクセス・トークン",
"aistudio_url_label": "AI Studio Community",
"api_url": "API URL",
"serving_doc_url_label": "PaddleOCR サービング ドキュメント",
"tip": "ローカルサービスをデプロイするには、公式の PaddleOCR ドキュメントを参照するか、PaddlePaddle AI Studio コミュニティ上でクラウドサービスをデプロイすることができます。後者の場合は、AI Studio コミュニティのアクセストークンを提供してください。"
},
"system": {
"win": {
"langs_tooltip": "Windows が提供するサービスに依存しており、関連する言語をサポートするには、システムで言語パックをダウンロードする必要があります。"
@ -4224,4 +4268,4 @@
"show_window": "ウィンドウを表示",
"visualization": "可視化"
}
}
}

View File

@ -963,9 +963,11 @@
},
"add_directory": "Adicionar diretório",
"add_file": "Adicionar arquivo",
"add_image": "Adicionar imagens",
"add_note": "Adicionar nota",
"add_sitemap": "Adicionar mapa do site",
"add_url": "Adicionar URL",
"add_video": "Adicionar vídeo",
"cancel_index": "Cancelar índice",
"chunk_overlap": "Sobreposição de bloco",
"chunk_overlap_placeholder": "Valor padrão (não recomendado alterar)",
@ -992,6 +994,7 @@
"document_count_default": "Padrão",
"document_count_help": "Quanto mais fragmentos de documentos solicitados, mais informações são incluídas, mas mais tokens são consumidos",
"drag_file": "Arraste o arquivo aqui",
"drag_image": "Arraste a imagem para aqui",
"edit_remark": "Editar observação",
"edit_remark_placeholder": "Digite o conteúdo da observação",
"embedding_model": "Modelo de incorporação",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "Falha ao criar o repositório de conhecimento",
"failed_to_edit": "Falha ao editar o repositório de conhecimento",
"model_invalid": "Modelo não selecionado ou eliminado"
"model_invalid": "Modelo não selecionado ou eliminado",
"video": {
"local_file_missing": "O ficheiro de vídeo não existe.",
"youtube_url_missing": "O link do vídeo do YouTube não existe."
}
},
"file_hint": "Formatos suportados: {{file_types}}",
"image_hint": "Suporta formatos {{image_types}}",
"images": "imagem",
"index_all": "Índice total",
"index_cancelled": "Índice cancelado",
"index_started": "Índice iniciado",
@ -1019,6 +1028,10 @@
"error": {
"failed": "Falha na migração"
},
"migrate_to_langchain": {
"content": "A migração da base de conhecimento não elimina a base antiga, mas sim cria uma cópia e reprocessa todas as entradas, o que pode consumir muitos tokens. Por favor, proceda com cautela.",
"info": "A arquitetura da base de conhecimento foi atualizada, clique para migrar para a nova arquitetura."
},
"source_dimensions": "Dimensões de origem",
"source_model": "Modelo de origem",
"target_dimensions": "Dimensões de destino",
@ -1037,6 +1050,20 @@
"quota": "Cota restante de {{name}}: {{quota}}",
"quota_infinity": "Cota restante de {{name}}: ilimitada",
"rename": "Renomear",
"retriever": "Modo de pesquisa",
"retriever_bm25": "pesquisa de texto completo",
"retriever_bm25_desc": "Pesquisar documentos com base na relevância e frequência das palavras-chave.",
"retriever_hybrid": "Pesquisa híbrida (recomendada)",
"retriever_hybrid_desc": "Combine a pesquisa por palavras-chave com a pesquisa semântica para alcançar a melhor precisão de recuperação.",
"retriever_hybrid_weight": {
"bm25": "texto integral",
"recommended": "Recomendar",
"title": "Ajuste de ponderação de pesquisa híbrida (texto completo/vetorial)",
"vector": "vetor"
},
"retriever_tooltip": "Utilize diferentes métodos de pesquisa para consultar a base de conhecimento.",
"retriever_vector": "pesquisa vetorial",
"retriever_vector_desc": "Encontrar documentos com base na similaridade semântica e significado.",
"search": "Pesquisar repositório de conhecimento",
"search_placeholder": "Digite o conteúdo da consulta",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "Pré-processamento concluído",
"status_preprocess_failed": "Falha no pré-processamento",
"status_processing": "Processando",
"subtitle_file": "[to be translated]:字幕文件",
"threshold": "Limite de correspondência",
"threshold_placeholder": "Não definido",
"threshold_too_large_or_small": "O limite não pode ser maior que 1 ou menor que 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "Número de resultados correspondentes retornados, quanto maior o valor, mais resultados correspondentes, mas mais tokens são consumidos",
"url_added": "URL adicionada",
"url_placeholder": "Digite a URL, várias URLs separadas por enter",
"urls": "URLs"
"urls": "URLs",
"videos": "vídeo",
"videos_file": "[to be translated]:视频文件"
},
"languages": {
"arabic": "Árabe",
@ -1376,6 +1406,13 @@
"bubble": "Bolha",
"label": "Estilo da mensagem",
"plain": "Simples"
},
"video": {
"error": {
"local_file_missing": "O caminho do arquivo de vídeo local não existe.",
"unsupported_type": "Tipo de vídeo não suportado",
"youtube_url_missing": "O link do vídeo do YouTube não existe."
}
}
},
"processing": "Processando...",
@ -3884,6 +3921,13 @@
"title": "Imagem"
},
"image_provider": "Provedor de serviços OCR",
"paddleocr": {
"aistudio_access_token": "Token de acesso da comunidade AI Studio",
"aistudio_url_label": "Comunidade AI Studio",
"api_url": "URL da API",
"serving_doc_url_label": "Documentação do PaddleOCR Serving",
"tip": "Você pode consultar a documentação oficial do PaddleOCR para implantar um serviço local ou implantar um serviço na nuvem na Comunidade PaddlePaddle AI Studio. No último caso, forneça o token de acesso da Comunidade AI Studio."
},
"system": {
"win": {
"langs_tooltip": "Dependendo do Windows para fornecer serviços, você precisa baixar pacotes de idiomas no sistema para dar suporte aos idiomas relevantes."

View File

@ -963,9 +963,11 @@
},
"add_directory": "Добавить директорию",
"add_file": "Добавить файл",
"add_image": "добавить изображение",
"add_note": "Добавить запись",
"add_sitemap": "Карта сайта",
"add_url": "Добавить URL",
"add_video": "Добавить видео",
"cancel_index": "Отменить индексирование",
"chunk_overlap": "Перекрытие фрагмента",
"chunk_overlap_placeholder": "По умолчанию (не рекомендуется изменять)",
@ -992,6 +994,7 @@
"document_count_default": "По умолчанию",
"document_count_help": "Количество запрошенных документов, вместе с ними передается больше информации, но и требуется больше токенов",
"drag_file": "Перетащите файл сюда",
"drag_image": "Перетащите изображение сюда",
"edit_remark": "Изменить примечание",
"edit_remark_placeholder": "Пожалуйста, введите содержание примечания",
"embedding_model": "Модель встраивания",
@ -1000,9 +1003,15 @@
"error": {
"failed_to_create": "Создание базы знаний завершено с ошибками",
"failed_to_edit": "Редактирование базы знаний завершено с ошибками",
"model_invalid": "Модель не выбрана"
"model_invalid": "Модель не выбрана",
"video": {
"local_file_missing": "Видеофайл не найден",
"youtube_url_missing": "URL видео YouTube не найден"
}
},
"file_hint": "Поддерживаются {{file_types}}",
"image_hint": "Поддерживаются форматы {{image_types}}",
"images": "изображение",
"index_all": "Индексировать все",
"index_cancelled": "Индексирование отменено",
"index_started": "Индексирование началось",
@ -1019,6 +1028,10 @@
"error": {
"failed": "Миграция завершена с ошибками"
},
"migrate_to_langchain": {
"content": "Миграция базы знаний не удаляет старую базу, а создает ее копию с последующей повторной обработкой всех записей, что может потребовать значительного количества токенов. Пожалуйста, действуйте осторожно.",
"info": "Архитектура базы знаний обновлена, нажмите, чтобы перейти на новую архитектуру"
},
"source_dimensions": "Исходная размерность",
"source_model": "Исходная модель",
"target_dimensions": "Целевая размерность",
@ -1037,6 +1050,20 @@
"quota": "{{name}} Остаток квоты: {{quota}}",
"quota_infinity": "{{name}} Квота: Не ограничена",
"rename": "Переименовать",
"retriever": "Режим поиска",
"retriever_bm25": "полнотекстовый поиск",
"retriever_bm25_desc": "Поиск документов на основе релевантности и частоты ключевых слов.",
"retriever_hybrid": "Гибридный поиск (рекомендуется)",
"retriever_hybrid_desc": "Сочетание поиска по ключевым словам и семантического поиска для достижения оптимальной точности поиска.",
"retriever_hybrid_weight": {
"bm25": "Полный текст",
"recommended": "рекомендовать",
"title": "Регулировка весов гибридного поиска (полнотекстовый/векторный)",
"vector": "вектор"
},
"retriever_tooltip": "Использование различных методов поиска в базе знаний",
"retriever_vector": "векторный поиск",
"retriever_vector_desc": "Поиск документов по семантическому сходству и смыслу.",
"search": "Поиск в базе знаний",
"search_placeholder": "Введите текст для поиска",
"settings": {
@ -1058,6 +1085,7 @@
"status_preprocess_completed": "Предварительная обработка завершена",
"status_preprocess_failed": "Предварительная обработка не удалась",
"status_processing": "Обработка",
"subtitle_file": "Файл субтитров",
"threshold": "Порог соответствия",
"threshold_placeholder": "Не установлено",
"threshold_too_large_or_small": "Порог не может быть больше 1 или меньше 0",
@ -1069,7 +1097,9 @@
"topN_tooltip": "Количество возвращаемых совпадений; чем больше значение, тем больше совпадений, но и потребление токенов тоже возрастает.",
"url_added": "URL добавлен",
"url_placeholder": "Введите URL, несколько URL через Enter",
"urls": "URL-адреса"
"urls": "URL-адреса",
"videos": "видео",
"videos_file": "видеофайл"
},
"languages": {
"arabic": "Арабский",
@ -1376,6 +1406,13 @@
"bubble": "Пузырь",
"label": "Стиль сообщения",
"plain": "Простой"
},
"video": {
"error": {
"local_file_missing": "Путь к локальному видеофайлу не найден",
"unsupported_type": "Неподдерживаемый тип видео",
"youtube_url_missing": "URL видео YouTube не найден"
}
}
},
"processing": "Обрабатывается...",
@ -3884,6 +3921,13 @@
"title": "Изображение"
},
"image_provider": "Поставщик услуг OCR",
"paddleocr": {
"aistudio_access_token": "Токен доступа сообщества AI Studio",
"aistudio_url_label": "Сообщество AI Studio",
"api_url": "URL API",
"serving_doc_url_label": "Документация по PaddleOCR Serving",
"tip": "Вы можете обратиться к официальной документации PaddleOCR, чтобы развернуть локальный сервис, либо развернуть облачный сервис в сообществе PaddlePaddle AI Studio. В последнем случае, пожалуйста, предоставьте токен доступа сообщества AI Studio."
},
"system": {
"win": {
"langs_tooltip": "Для предоставления служб Windows необходимо загрузить языковой пакет в системе для поддержки соответствующего языка."
@ -4224,4 +4268,4 @@
"show_window": "Показать окно",
"visualization": "Визуализация"
}
}
}

View File

@ -10,8 +10,10 @@ import {
FileZipFilled,
FolderOpenFilled,
GlobalOutlined,
LinkOutlined
LinkOutlined,
VideoCameraFilled
} from '@ant-design/icons'
import { videoExts } from '@shared/config/constant'
import { Flex } from 'antd'
import React, { memo } from 'react'
import styled from 'styled-components'
@ -72,6 +74,10 @@ const getFileIcon = (type?: string) => {
return <FolderOpenFilled />
}
if (videoExts.includes(ext)) {
return <VideoCameraFilled />
}
return <FileUnknownFilled />
}

View File

@ -0,0 +1,14 @@
import type { VideoMessageBlock } from '@renderer/types/newMessage'
import React from 'react'
import MessageVideo from '../MessageVideo'
interface Props {
block: VideoMessageBlock
}
const VideoBlock: React.FC<Props> = ({ block }) => {
return <MessageVideo block={block} />
}
export default React.memo(VideoBlock)

View File

@ -1,8 +1,9 @@
import { loggerService } from '@logger'
import type { RootState } from '@renderer/store'
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
import type { ImageMessageBlock, MainTextMessageBlock, Message, MessageBlock } from '@renderer/types/newMessage'
import type { ImageMessageBlock, Message, MessageBlock } from '@renderer/types/newMessage'
import { MessageBlockStatus, MessageBlockType } from '@renderer/types/newMessage'
import { isMainTextBlock, isVideoBlock } from '@renderer/utils/messageUtils/is'
import { AnimatePresence, motion, type Variants } from 'motion/react'
import React, { useMemo } from 'react'
import { useSelector } from 'react-redux'
@ -17,6 +18,7 @@ import PlaceholderBlock from './PlaceholderBlock'
import ThinkingBlock from './ThinkingBlock'
import ToolBlock from './ToolBlock'
import TranslationBlock from './TranslationBlock'
import VideoBlock from './VideoBlock'
const logger = loggerService.withContext('MessageBlockRenderer')
@ -60,15 +62,37 @@ interface Props {
message: Message
}
const filterImageBlockGroups = (blocks: MessageBlock[]): (MessageBlock[] | MessageBlock)[] => {
const groupSimilarBlocks = (blocks: MessageBlock[]): (MessageBlock[] | MessageBlock)[] => {
return blocks.reduce((acc: (MessageBlock[] | MessageBlock)[], currentBlock) => {
if (currentBlock.type === MessageBlockType.IMAGE) {
// 对于IMAGE类型按连续分组
const prevGroup = acc[acc.length - 1]
if (Array.isArray(prevGroup) && prevGroup[0].type === MessageBlockType.IMAGE) {
prevGroup.push(currentBlock)
} else {
acc.push([currentBlock])
}
} else if (currentBlock.type === MessageBlockType.VIDEO) {
// 对于VIDEO类型按相同filePath分组
if (!isVideoBlock(currentBlock)) {
logger.warn('Block type is VIDEO but failed type guard check', currentBlock)
acc.push(currentBlock)
return acc
}
const videoBlock = currentBlock
const existingGroup = acc.find(
(group) =>
Array.isArray(group) &&
group[0].type === MessageBlockType.VIDEO &&
isVideoBlock(group[0]) &&
group[0].filePath === videoBlock.filePath
) as MessageBlock[] | undefined
if (existingGroup) {
existingGroup.push(currentBlock)
} else {
acc.push([currentBlock])
}
} else {
acc.push(currentBlock)
}
@ -81,30 +105,46 @@ const MessageBlockRenderer: React.FC<Props> = ({ blocks, message }) => {
const blockEntities = useSelector((state: RootState) => messageBlocksSelectors.selectEntities(state))
// 根据blocks类型处理渲染数据
const renderedBlocks = blocks.map((blockId) => blockEntities[blockId]).filter(Boolean)
const groupedBlocks = useMemo(() => filterImageBlockGroups(renderedBlocks), [renderedBlocks])
const groupedBlocks = useMemo(() => groupSimilarBlocks(renderedBlocks), [renderedBlocks])
return (
<AnimatePresence mode="sync">
{groupedBlocks.map((block) => {
if (Array.isArray(block)) {
const groupKey = block.map((imageBlock) => imageBlock.id).join('-')
// 单张图片不使用 ImageBlockGroup 包装
if (block.length === 1) {
const groupKey = block.map((b) => b.id).join('-')
if (block[0].type === MessageBlockType.IMAGE) {
if (block.length === 1) {
return (
<AnimatedBlockWrapper key={groupKey} enableAnimation={message.status.includes('ing')}>
<ImageBlock key={block[0].id} block={block[0] as ImageMessageBlock} isSingle={true} />
</AnimatedBlockWrapper>
)
}
// 多张图片使用 ImageBlockGroup 包装
return (
<AnimatedBlockWrapper key={groupKey} enableAnimation={message.status.includes('ing')}>
<ImageBlock key={block[0].id} block={block[0] as ImageMessageBlock} isSingle={true} />
<ImageBlockGroup count={block.length}>
{block.map((imageBlock) => (
<ImageBlock key={imageBlock.id} block={imageBlock as ImageMessageBlock} isSingle={false} />
))}
</ImageBlockGroup>
</AnimatedBlockWrapper>
)
} else if (block[0].type === MessageBlockType.VIDEO) {
// 对于相同路径的video只渲染第一个
if (!isVideoBlock(block[0])) {
logger.warn('Expected video block but got different type', block[0])
return null
}
const firstVideoBlock = block[0]
return (
<AnimatedBlockWrapper key={groupKey} enableAnimation={message.status.includes('ing')}>
<VideoBlock key={firstVideoBlock.id} block={firstVideoBlock} />
</AnimatedBlockWrapper>
)
}
// 多张图片使用 ImageBlockGroup 包装
return (
<AnimatedBlockWrapper key={groupKey} enableAnimation={message.status.includes('ing')}>
<ImageBlockGroup count={block.length}>
{block.map((imageBlock) => (
<ImageBlock key={imageBlock.id} block={imageBlock as ImageMessageBlock} isSingle={false} />
))}
</ImageBlockGroup>
</AnimatedBlockWrapper>
)
return null
}
let blockComponent: React.ReactNode = null
@ -117,11 +157,13 @@ const MessageBlockRenderer: React.FC<Props> = ({ blocks, message }) => {
break
case MessageBlockType.MAIN_TEXT:
case MessageBlockType.CODE: {
const mainTextBlock = block as MainTextMessageBlock
if (!isMainTextBlock(block)) {
logger.warn('Expected main text block but got different type', block)
break
}
const mainTextBlock = block
// Find the associated citation block ID from the references
const citationBlockId = mainTextBlock.citationReferences?.[0]?.citationBlockId
// No longer need to retrieve the full citation block here
// const citationBlock = citationBlockId ? (blockEntities[citationBlockId] as CitationMessageBlock) : undefined
blockComponent = (
<MainTextBlock
@ -155,6 +197,9 @@ const MessageBlockRenderer: React.FC<Props> = ({ blocks, message }) => {
case MessageBlockType.TRANSLATION:
blockComponent = <TranslationBlock key={block.id} block={block} />
break
case MessageBlockType.VIDEO:
blockComponent = <VideoBlock key={block.id} block={block} />
break
default:
logger.warn('Unsupported block type in MessageBlockRenderer:', (block as any).type, block)
break

View File

@ -0,0 +1,112 @@
import { loggerService } from '@renderer/services/LoggerService'
import { VideoMessageBlock } from '@renderer/types/newMessage'
import { FC, useRef } from 'react'
import { useTranslation } from 'react-i18next'
import ReactPlayer from 'react-player'
import YouTube, { YouTubeProps } from 'react-youtube'
import styled from 'styled-components'
const logger = loggerService.withContext('MessageVideo')
interface Props {
block: VideoMessageBlock
}
const MessageVideo: FC<Props> = ({ block }) => {
const playerRef = useRef<HTMLVideoElement | null>(null)
const { t } = useTranslation()
logger.debug(`MessageVideo: ${JSON.stringify(block)}`)
if (!block.url && !block.filePath) {
return null
}
/**
* YouTube
*/
const renderYoutube = () => {
if (!block.url) {
logger.warn('YouTube video was requested but block.url is missing.')
return <div>{t('message.video.error.youtube_url_missing')}</div>
}
const onPlayerReady: YouTubeProps['onReady'] = (event) => {
event.target.pauseVideo()
}
const opts: YouTubeProps['opts'] = {
height: '100%',
width: '100%',
playerVars: {
start: Math.floor(block.metadata?.startTime ?? 0)
}
}
return <YouTube style={{ height: '100%', width: '100%' }} videoId={block.url} opts={opts} onReady={onPlayerReady} />
}
/**
*
*/
const renderLocalVideo = () => {
if (!block.filePath) {
logger.warn('Local video was requested but block.filePath is missing.')
return <div>{t('message.video.error.local_file_missing')}</div>
}
const videoSrc = `file://${block.metadata?.video.path}`
const handleReady = () => {
const startTime = Math.floor(block.metadata?.startTime ?? 0)
if (playerRef.current) {
playerRef.current.currentTime = startTime
}
}
return (
<ReactPlayer
ref={playerRef}
style={{
height: '100%',
width: '100%'
}}
src={videoSrc}
controls
onReady={handleReady}
/>
)
}
const renderVideo = () => {
switch (block.metadata?.type) {
case 'youtube':
return renderYoutube()
case 'video':
return renderLocalVideo()
default:
if (block.filePath) {
logger.warn(
`Unknown video type: ${block.metadata?.type}, but with filePath will try to render as local video.`
)
return renderLocalVideo()
}
logger.warn(`Unsupported video type: ${block.metadata?.type} or missing necessary data.`)
return <div>{t('message.video.error.unsupported_type')}</div>
}
}
return <Container>{renderVideo()}</Container>
}
export default MessageVideo
const Container = styled.div`
max-width: 560px;
width: 100%;
aspect-ratio: 16 / 9;
height: auto;
background-color: #000;
`

View File

@ -7,19 +7,21 @@ import { NavbarIcon } from '@renderer/pages/home/ChatNavbar'
import { getProviderName } from '@renderer/services/ProviderService'
import { KnowledgeBase } from '@renderer/types'
import { Button, Empty, Tabs, Tag, Tooltip } from 'antd'
import { Book, Folder, Globe, Link, Notebook, Search, Settings } from 'lucide-react'
import { Book, Folder, Globe, Link, Notebook, Search, Settings, Video } from 'lucide-react'
import { FC, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import EditKnowledgeBasePopup from './components/EditKnowledgeBasePopup'
import KnowledgeSearchPopup from './components/KnowledgeSearchPopup'
import MigrationInfoTag from './components/MigrationInfoTag'
import QuotaTag from './components/QuotaTag'
import KnowledgeDirectories from './items/KnowledgeDirectories'
import KnowledgeFiles from './items/KnowledgeFiles'
import KnowledgeNotes from './items/KnowledgeNotes'
import KnowledgeSitemaps from './items/KnowledgeSitemaps'
import KnowledgeUrls from './items/KnowledgeUrls'
import KnowledgeVideos from './items/KnowledgeVideos'
const logger = loggerService.withContext('KnowledgeContent')
interface KnowledgeContentProps {
@ -28,7 +30,9 @@ interface KnowledgeContentProps {
const KnowledgeContent: FC<KnowledgeContentProps> = ({ selectedBase }) => {
const { t } = useTranslation()
const { base, urlItems, fileItems, directoryItems, noteItems, sitemapItems } = useKnowledge(selectedBase.id || '')
const { base, urlItems, fileItems, directoryItems, noteItems, sitemapItems, videoItems } = useKnowledge(
selectedBase.id || ''
)
const [activeKey, setActiveKey] = useState('files')
const [quota, setQuota] = useState<number | undefined>(undefined)
const [progressMap, setProgressMap] = useState<Map<string, number>>(new Map())
@ -69,35 +73,49 @@ const KnowledgeContent: FC<KnowledgeContentProps> = ({ selectedBase }) => {
title: t('files.title'),
icon: activeKey === 'files' ? <Book size={16} color="var(--color-primary)" /> : <Book size={16} />,
items: fileItems,
content: <KnowledgeFiles selectedBase={selectedBase} progressMap={progressMap} preprocessMap={preprocessMap} />
content: <KnowledgeFiles selectedBase={selectedBase} progressMap={progressMap} preprocessMap={preprocessMap} />,
show: true
},
{
key: 'notes',
title: t('knowledge.notes'),
icon: activeKey === 'notes' ? <Notebook size={16} color="var(--color-primary)" /> : <Notebook size={16} />,
items: noteItems,
content: <KnowledgeNotes selectedBase={selectedBase} />
content: <KnowledgeNotes selectedBase={selectedBase} />,
show: true
},
{
key: 'directories',
title: t('knowledge.directories'),
icon: activeKey === 'directories' ? <Folder size={16} color="var(--color-primary)" /> : <Folder size={16} />,
items: directoryItems,
content: <KnowledgeDirectories selectedBase={selectedBase} progressMap={progressMap} />
content: <KnowledgeDirectories selectedBase={selectedBase} progressMap={progressMap} />,
show: true
},
{
key: 'urls',
title: t('knowledge.urls'),
icon: activeKey === 'urls' ? <Link size={16} color="var(--color-primary)" /> : <Link size={16} />,
items: urlItems,
content: <KnowledgeUrls selectedBase={selectedBase} />
content: <KnowledgeUrls selectedBase={selectedBase} />,
show: true
},
{
key: 'sitemaps',
title: t('knowledge.sitemaps'),
icon: activeKey === 'sitemaps' ? <Globe size={16} color="var(--color-primary)" /> : <Globe size={16} />,
items: sitemapItems,
content: <KnowledgeSitemaps selectedBase={selectedBase} />
content: <KnowledgeSitemaps selectedBase={selectedBase} />,
show: true
},
{
key: 'videos',
title: t('knowledge.videos'),
icon: activeKey === 'videos' ? <Video size={16} color="var(--color-primary)" /> : <Video size={16} />,
items: videoItems,
content: <KnowledgeVideos selectedBase={selectedBase} />,
show: base?.framework === 'langchain'
}
]
@ -105,19 +123,21 @@ const KnowledgeContent: FC<KnowledgeContentProps> = ({ selectedBase }) => {
return null
}
const tabItems = knowledgeItems.map((item) => ({
key: item.key,
label: (
<TabLabel>
{item.icon}
<span>{item.title}</span>
<CustomTag size={10} color={item.items.length > 0 ? '#00b96b' : '#cccccc'}>
{item.items.length}
</CustomTag>
</TabLabel>
),
children: <TabContent>{item.content}</TabContent>
}))
const tabItems = knowledgeItems
.filter((item) => item.show)
.map((item) => ({
key: item.key,
label: (
<TabLabel>
{item.icon}
<span>{item.title}</span>
<CustomTag size={10} color={item.items.length > 0 ? '#00b96b' : '#cccccc'}>
{item.items.length}
</CustomTag>
</TabLabel>
),
children: <TabContent>{item.content}</TabContent>
}))
return (
<MainContainer>
@ -142,6 +162,7 @@ const KnowledgeContent: FC<KnowledgeContentProps> = ({ selectedBase }) => {
{base.preprocessProvider && base.preprocessProvider.type === 'preprocess' && (
<QuotaTag base={base} providerId={base.preprocessProvider?.provider.id} quota={quota} />
)}
{base.framework !== 'langchain' && <MigrationInfoTag base={base} />}
</div>
</ModelInfo>
<HStack gap={8} alignItems="center">
@ -322,4 +343,12 @@ export const FlexAlignCenter = styled.div`
justify-content: center;
`
export const ResponsiveButton = styled(Button)`
@media (max-width: 1080px) {
.ant-btn-icon + span {
display: none;
}
}
`
export default KnowledgeContent

View File

@ -78,6 +78,7 @@ function createKnowledgeBase(overrides: Partial<KnowledgeBase> = {}): KnowledgeB
chunkSize: 500,
chunkOverlap: 200,
threshold: 0.5,
framework: 'langchain',
...overrides
}
}

View File

@ -137,18 +137,43 @@ vi.mock('antd', () => ({
{children}
</select>
),
Slider: ({ value, onChange, min, max, step, marks }: any) => (
<input
data-testid="document-count-slider"
type="range"
value={value}
onChange={(e) => onChange?.(Number(e.target.value))}
min={min}
max={max}
step={step}
data-marks={JSON.stringify(marks)}
/>
)
Segmented: ({ value, onChange, options, style }: any) => (
<div data-testid="retriever-segmented" style={style}>
{options?.map((option: any) => (
<button
key={option.value}
type="button"
data-testid={`segmented-option-${option.value}`}
onClick={() => onChange?.(option.value)}
data-active={value === option.value}
style={{
backgroundColor: value === option.value ? '#1677ff' : '#fff',
color: value === option.value ? '#fff' : '#000'
}}>
{option.label}
</button>
))}
</div>
),
Slider: ({ value, onChange, min, max, step, marks, style }: any) => {
// Determine test ID based on slider characteristics
const isWeightSlider = min === 0 && max === 1 && step === 0.1
const testId = isWeightSlider ? 'weight-slider' : 'document-count-slider'
return (
<input
data-testid={testId}
type="range"
value={value}
onChange={(e) => onChange?.(Number(e.target.value))}
min={min}
max={max}
step={step}
style={style}
data-marks={JSON.stringify(marks)}
/>
)
}
}))
/**
@ -168,10 +193,14 @@ function createKnowledgeBase(overrides: Partial<KnowledgeBase> = {}): KnowledgeB
id: 'test-base-id',
name: 'Test Knowledge Base',
model: defaultModel,
retriever: {
mode: 'hybrid'
},
items: [],
created_at: Date.now(),
updated_at: Date.now(),
version: 1,
framework: 'langchain',
...overrides
}
}
@ -290,6 +319,42 @@ describe('GeneralSettingsPanel', () => {
expect(mockSetNewBase).toHaveBeenCalledWith(expect.any(Function))
})
it('should handle hybrid weight change', async () => {
renderComponent()
const weightSlider = screen.getByTestId('weight-slider')
fireEvent.change(weightSlider, { target: { value: '0.7' } })
expect(mockSetNewBase).toHaveBeenCalledWith({
...mockBase,
retriever: {
...mockBase.retriever,
mode: 'hybrid',
weight: 0.7
}
})
})
it('should handle retriever selection change', async () => {
renderComponent()
// Test clicking on hybrid retriever option
const hybridOption = screen.getByTestId('segmented-option-hybrid')
await user.click(hybridOption)
expect(mockSetNewBase).toHaveBeenCalledWith({
...mockBase,
retriever: { mode: 'hybrid' }
})
})
it('should not render retriever segmented when framework is embedjs', () => {
const baseWithEmbedjs = createKnowledgeBase({ framework: 'embedjs' })
renderComponent({ newBase: baseWithEmbedjs })
expect(screen.queryByTestId('retriever-segmented')).not.toBeInTheDocument()
})
it('should disable dimension input when no model is selected', () => {
const baseWithoutModel = createKnowledgeBase({ model: undefined as any })
renderComponent({ newBase: baseWithoutModel })

View File

@ -34,6 +34,43 @@ exports[`GeneralSettingsPanel > basic rendering > should match snapshot 1`] = `
value="Test Knowledge Base"
/>
</div>
<div
class="c1"
>
<div
class="settings-label"
>
settings.tool.preprocess.title
<span
data-placement="right"
data-testid="info-tooltip"
title="settings.tool.preprocess.tooltip"
>
</span>
</div>
<select
data-allow-clear="true"
data-placeholder="settings.tool.preprocess.provider_placeholder"
data-testid="preprocess-select"
>
<option
value=""
>
Select option
</option>
<option
value="doc2x"
>
Doc2X
</option>
<option
value="mistral"
>
Mistral
</option>
</select>
</div>
<div
class="c1"
>
@ -139,36 +176,62 @@ exports[`GeneralSettingsPanel > basic rendering > should match snapshot 1`] = `
<div
class="settings-label"
>
settings.tool.preprocess.title
knowledge.retriever
<span
data-placement="right"
data-testid="info-tooltip"
title="settings.tool.preprocess.tooltip"
title="knowledge.retriever_tooltip"
>
</span>
</div>
<select
data-allow-clear="true"
data-placeholder="settings.tool.preprocess.provider_placeholder"
data-testid="preprocess-select"
<div
data-testid="retriever-segmented"
>
<option
value=""
<button
data-active="true"
data-testid="segmented-option-hybrid"
style="background-color: rgb(22, 119, 255); color: rgb(255, 255, 255);"
type="button"
>
Select option
</option>
<option
value="doc2x"
knowledge.retriever_hybrid
</button>
<button
data-active="false"
data-testid="segmented-option-vector"
style="background-color: rgb(255, 255, 255); color: rgb(0, 0, 0);"
type="button"
>
Doc2X
</option>
<option
value="mistral"
knowledge.retriever_vector
</button>
<button
data-active="false"
data-testid="segmented-option-bm25"
style="background-color: rgb(255, 255, 255); color: rgb(0, 0, 0);"
type="button"
>
Mistral
</option>
</select>
knowledge.retriever_bm25
</button>
</div>
</div>
<div
class="c1"
>
<div
class="settings-label"
>
knowledge.retriever_hybrid_weight.title
</div>
<input
data-marks="{"0":"knowledge.retriever_hybrid_weight.bm25","1":"knowledge.retriever_hybrid_weight.vector","0.5":"knowledge.retriever_hybrid_weight.recommended"}"
data-testid="weight-slider"
max="1"
min="0"
step="0.1"
style="width: 100%;"
type="range"
value="0.5"
/>
</div>
<div
class="c1"
@ -191,6 +254,7 @@ exports[`GeneralSettingsPanel > basic rendering > should match snapshot 1`] = `
max="50"
min="1"
step="1"
style="width: 100%;"
type="range"
value="6"
/>

View File

@ -65,7 +65,7 @@ exports[`KnowledgeBaseFormModal > basic rendering > should match snapshot 1`] =
data-title="Knowledge Base Settings"
styles="[object Object]"
transitionname="animation-move-down"
width="min(800px, 70vw)"
width="min(900px, 75vw)"
>
<div
data-testid="modal-header"

View File

@ -3,6 +3,7 @@ import { TopView } from '@renderer/components/TopView'
import { useKnowledgeBases } from '@renderer/hooks/useKnowledge'
import { useKnowledgeBaseForm } from '@renderer/hooks/useKnowledgeBaseForm'
import { getKnowledgeBaseParams } from '@renderer/services/KnowledgeService'
import { KnowledgeBase } from '@renderer/types'
import { formatErrorMessage } from '@renderer/utils/error'
import { useState } from 'react'
import { useTranslation } from 'react-i18next'
@ -47,10 +48,11 @@ const PopupContainer: React.FC<PopupContainerProps> = ({ title, resolve }) => {
}
try {
const _newBase = {
const _newBase: KnowledgeBase = {
...newBase,
created_at: Date.now(),
updated_at: Date.now()
updated_at: Date.now(),
framework: 'langchain'
}
await window.api.knowledgeBase.create(getKnowledgeBaseParams(_newBase))

View File

@ -4,7 +4,7 @@ import { TopView } from '@renderer/components/TopView'
import { useKnowledge } from '@renderer/hooks/useKnowledge'
import { useKnowledgeBaseForm } from '@renderer/hooks/useKnowledgeBaseForm'
import { getModelUniqId } from '@renderer/services/ModelService'
import { KnowledgeBase } from '@renderer/types'
import { KnowledgeBase, MigrationModeEnum } from '@renderer/types'
import { formatErrorMessage } from '@renderer/utils/error'
import { Flex } from 'antd'
import { useCallback, useMemo, useState } from 'react'
@ -44,10 +44,11 @@ const PopupContainer: React.FC<PopupContainerProps> = ({ base: _base, resolve })
[base, newBase]
)
const handleMigration = useCallback(async () => {
// 处理嵌入模型更改迁移
const handleEmbeddingModelChangeMigration = useCallback(async () => {
const migratedBase = { ...newBase, id: nanoid() }
try {
await migrateBase(migratedBase)
await migrateBase(migratedBase, MigrationModeEnum.EmbeddingModelChange)
setOpen(false)
resolve(migratedBase)
} catch (error) {
@ -83,7 +84,7 @@ const PopupContainer: React.FC<PopupContainerProps> = ({ base: _base, resolve })
),
okText: t('knowledge.migrate.confirm.ok'),
centered: true,
onOk: handleMigration
onOk: handleEmbeddingModelChangeMigration
})
} else {
try {

View File

@ -0,0 +1,31 @@
import { FileMetadata, KnowledgeSearchResult } from '@renderer/types'
import { Typography } from 'antd'
import React, { FC } from 'react'
import { useTranslation } from 'react-i18next'
import { CopyButtonContainer, KnowledgeItemMetadata } from './components'
import { useHighlightText } from './hooks'
const { Paragraph } = Typography
interface Props {
item: KnowledgeSearchResult & {
file: FileMetadata | null
}
searchKeyword: string
}
const TextItem: FC<Props> = ({ item, searchKeyword }) => {
const { t } = useTranslation()
const { highlightText } = useHighlightText()
return (
<>
<KnowledgeItemMetadata item={item} />
<CopyButtonContainer textToCopy={item.pageContent} tooltipTitle={t('common.copy')} />
<Paragraph style={{ userSelect: 'text', marginBottom: 0 }}>
{highlightText(item.pageContent, searchKeyword)}
</Paragraph>
</>
)
}
export default React.memo(TextItem)

View File

@ -0,0 +1,138 @@
import { FileMetadata, KnowledgeSearchResult } from '@renderer/types'
import { Typography } from 'antd'
import React, { FC, useRef } from 'react'
import ReactPlayer from 'react-player'
import YouTube, { YouTubeProps } from 'react-youtube'
import styled from 'styled-components'
const { Paragraph } = Typography
import { loggerService } from '@logger'
import { useTranslation } from 'react-i18next'
import { CopyButtonContainer, KnowledgeItemMetadata } from './components'
import { useHighlightText } from './hooks'
interface Props {
item: KnowledgeSearchResult & {
file: FileMetadata | null
}
searchKeyword: string
}
const logger = loggerService.withContext('KnowledgeSearchPopup VideoItem')
const VideoItem: FC<Props> = ({ item, searchKeyword }) => {
const { t } = useTranslation()
const playerRef = useRef<HTMLVideoElement | null>(null)
const { highlightText } = useHighlightText()
/**
* YouTube
*/
const renderYoutube = () => {
if (!item.metadata.source) {
logger.warn('YouTube video was requested but block.url is missing.')
return <ErrorContainer>{t('knowledge.error.video.youtube_url_missing')}</ErrorContainer>
}
const onPlayerReady: YouTubeProps['onReady'] = (event) => {
event.target.pauseVideo()
}
const opts: YouTubeProps['opts'] = {
height: '100%',
width: '100%',
playerVars: {
start: Math.floor(item.metadata?.startTime ?? 0)
}
}
return (
<YouTube
style={{ height: '100%', width: '100%' }}
videoId={item.metadata.source}
opts={opts}
onReady={onPlayerReady}
/>
)
}
/**
*
*/
const renderLocalVideo = () => {
if (!item.metadata.video.path) {
logger.warn('Local video was requested but block.filePath is missing.')
return <ErrorContainer>{t('knowledge.error.video.local_file_missing')}</ErrorContainer>
}
const videoSrc = `file://${item.metadata?.video?.path}`
const handleReady = () => {
const startTime = Math.floor(item.metadata?.startTime ?? 0)
if (playerRef.current) {
playerRef.current.currentTime = startTime
}
}
return (
<ReactPlayer
ref={playerRef}
style={{
height: '100%',
width: '100%'
}}
src={videoSrc}
controls
onReady={handleReady}
/>
)
}
const renderVideo = () => {
switch (item.metadata?.type) {
case 'youtube':
return renderYoutube()
case 'video':
return renderLocalVideo()
default:
return
}
}
return (
<>
<KnowledgeItemMetadata item={item} />
<CopyButtonContainer textToCopy={item.pageContent} tooltipTitle={t('common.copy')} />
<Paragraph style={{ userSelect: 'text', marginBottom: 0 }}>
{highlightText(item.pageContent, searchKeyword)}
</Paragraph>
<VideoContainer>{renderVideo()}</VideoContainer>
</>
)
}
export default React.memo(VideoItem)
const VideoContainer = styled.div`
width: 100%;
aspect-ratio: 16 / 9;
height: auto;
background-color: #000;
margin-top: 8px;
border-radius: 8px;
overflow: hidden;
`
const ErrorContainer = styled.div`
display: flex;
align-items: center;
justify-content: center;
height: 100%;
color: #999;
font-size: 14px;
`

View File

@ -0,0 +1,54 @@
import { CopyOutlined } from '@ant-design/icons'
import { FileMetadata, KnowledgeSearchResult } from '@renderer/types'
import { Tooltip, Typography } from 'antd'
import React from 'react'
import { useTranslation } from 'react-i18next'
import { CopyButton, MetadataContainer, ScoreTag, TagContainer } from '.'
import { useCopyText, useKnowledgeItemMetadata } from './hooks'
const { Text } = Typography
interface KnowledgeItemMetadataProps {
item: KnowledgeSearchResult & {
file: FileMetadata | null
}
}
export const KnowledgeItemMetadata: React.FC<KnowledgeItemMetadataProps> = ({ item }) => {
const { getSourceLink } = useKnowledgeItemMetadata()
const { t } = useTranslation()
const sourceLink = getSourceLink(item)
return (
<MetadataContainer>
<Text type="secondary">
{t('knowledge.source')}:{' '}
<a href={sourceLink.href} target="_blank" rel="noreferrer">
{sourceLink.text}
</a>
</Text>
{item.score !== 0 && <ScoreTag>Score: {(item.score * 100).toFixed(1)}%</ScoreTag>}
</MetadataContainer>
)
}
interface CopyButtonContainerProps {
textToCopy: string
tooltipTitle?: string
}
export const CopyButtonContainer: React.FC<CopyButtonContainerProps> = ({ textToCopy, tooltipTitle = 'Copy' }) => {
const { handleCopy } = useCopyText()
return (
<TagContainer>
<Tooltip title={tooltipTitle}>
<CopyButton onClick={() => handleCopy(textToCopy)}>
<CopyOutlined />
</CopyButton>
</Tooltip>
</TagContainer>
)
}

View File

@ -0,0 +1,72 @@
import { loggerService } from '@logger'
import { isValidUrl } from '@renderer/utils/fetch'
import { message } from 'antd'
import React, { ReactElement } from 'react'
import { useTranslation } from 'react-i18next'
const logger = loggerService.withContext('KnowledgeSearchItem hooks')
/**
* hook
*/
export const useHighlightText = () => {
const highlightText = (text: string, searchKeyword: string): (string | ReactElement)[] => {
if (!searchKeyword) return [text]
// Escape special characters in the search keyword
const escapedKeyword = searchKeyword.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
const parts = text.split(new RegExp(`(${escapedKeyword})`, 'gi'))
return parts.map((part, i) =>
part.toLowerCase() === searchKeyword.toLowerCase() ? React.createElement('mark', { key: i }, part) : part
)
}
return { highlightText }
}
/**
* hook
*/
export const useCopyText = () => {
const { t } = useTranslation()
const handleCopy = async (text: string) => {
try {
await navigator.clipboard.writeText(text)
message.success(t('message.copied'))
} catch (error) {
logger.error('Failed to copy text:', error as Error)
window.message.error(t('message.error.copy') || 'Failed to copy text')
}
}
return { handleCopy }
}
/**
* hook
*/
export const useKnowledgeItemMetadata = () => {
const getSourceLink = (item: { file: any; metadata: any }) => {
if (item.file) {
return {
href: `http://file/${item.file.name}`,
text: item.file.origin_name
}
} else if (isValidUrl(item.metadata.source)) {
return {
href: item.metadata.source,
text: item.metadata.source
}
} else {
// 处理预处理后的文件source
return {
href: `file://${item.metadata.source}`,
text: item.metadata.source.split('/').pop() || item.metadata.source
}
}
}
return { getSourceLink }
}

View File

@ -0,0 +1,93 @@
import { FileMetadata, KnowledgeSearchResult } from '@renderer/types'
import React from 'react'
import styled from 'styled-components'
import TextItem from './TextItem'
import VideoItem from './VideoItem'
// Export shared components
export { CopyButtonContainer, KnowledgeItemMetadata } from './components'
export { useCopyText, useHighlightText, useKnowledgeItemMetadata } from './hooks'
interface Props {
item: KnowledgeSearchResult & {
file: FileMetadata | null
}
searchKeyword: string
}
const SearchItemRenderer: React.FC<Props> = ({ item, searchKeyword }) => {
const renderItem = () => {
if (item.metadata.type === 'video') {
return <VideoItem item={item} searchKeyword={searchKeyword} />
} else {
return <TextItem item={item} searchKeyword={searchKeyword} />
}
}
return <ResultItem>{renderItem()}</ResultItem>
}
export default React.memo(SearchItemRenderer)
export const TagContainer = styled.div`
position: absolute;
top: 58px;
right: 16px;
display: flex;
align-items: center;
gap: 8px;
opacity: 0;
transition: opacity 0.2s;
`
const ResultItem = styled.div`
width: 100%;
position: relative;
padding: 16px;
background: var(--color-background-soft);
border-radius: 8px;
&:hover {
${TagContainer} {
opacity: 1 !important;
}
}
`
export const ScoreTag = styled.div`
padding: 2px 8px;
background: var(--color-primary);
color: white;
border-radius: 4px;
font-size: 12px;
flex-shrink: 0;
`
export const CopyButton = styled.div`
display: flex;
align-items: center;
justify-content: center;
width: 24px;
height: 24px;
background: var(--color-background-mute);
color: var(--color-text);
border-radius: 4px;
cursor: pointer;
transition: all 0.2s;
&:hover {
background: var(--color-primary);
color: white;
}
`
export const MetadataContainer = styled.div`
display: flex;
justify-content: space-between;
align-items: center;
gap: 16px;
margin-bottom: 8px;
padding-bottom: 8px;
border-bottom: 1px solid var(--color-border);
user-select: text;
`

View File

@ -1,17 +1,15 @@
import { CopyOutlined } from '@ant-design/icons'
import type { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import { loggerService } from '@logger'
import { HStack } from '@renderer/components/Layout'
import { TopView } from '@renderer/components/TopView'
import { searchKnowledgeBase } from '@renderer/services/KnowledgeService'
import { FileMetadata, KnowledgeBase } from '@renderer/types'
import { Divider, Input, InputRef, List, message, Modal, Spin, Tooltip, Typography } from 'antd'
import { FileMetadata, KnowledgeBase, KnowledgeSearchResult } from '@renderer/types'
import { Divider, Input, InputRef, List, Modal, Spin } from 'antd'
import { Search } from 'lucide-react'
import { useEffect, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
const { Text, Paragraph } = Typography
import SearchItemRenderer from './KnowledgeSearchItem'
interface ShowParams {
base: KnowledgeBase
@ -26,7 +24,7 @@ const logger = loggerService.withContext('KnowledgeSearchPopup')
const PopupContainer: React.FC<Props> = ({ base, resolve }) => {
const [open, setOpen] = useState(true)
const [loading, setLoading] = useState(false)
const [results, setResults] = useState<Array<ExtractChunkData & { file: FileMetadata | null }>>([])
const [results, setResults] = useState<Array<KnowledgeSearchResult & { file: FileMetadata | null }>>([])
const [searchKeyword, setSearchKeyword] = useState('')
const { t } = useTranslation()
const searchInputRef = useRef<InputRef>(null)
@ -42,6 +40,7 @@ const PopupContainer: React.FC<Props> = ({ base, resolve }) => {
setLoading(true)
try {
const searchResults = await searchKnowledgeBase(value, base)
logger.debug(`KnowledgeSearchPopup Search Results: ${searchResults}`)
setResults(searchResults)
} catch (error) {
logger.error(`Failed to search knowledge base ${base.name}:`, error as Error)
@ -65,28 +64,6 @@ const PopupContainer: React.FC<Props> = ({ base, resolve }) => {
KnowledgeSearchPopup.hide = onCancel
const highlightText = (text: string) => {
if (!searchKeyword) return text
// Escape special characters in the search keyword
const escapedKeyword = searchKeyword.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
const parts = text.split(new RegExp(`(${escapedKeyword})`, 'gi'))
return parts.map((part, i) =>
part.toLowerCase() === searchKeyword.toLowerCase() ? <mark key={i}>{part}</mark> : part
)
}
const handleCopy = async (text: string) => {
try {
await navigator.clipboard.writeText(text)
message.success(t('message.copied'))
} catch (error) {
logger.error('Failed to copy text:', error as Error)
window.message.error(t('message.error.copy') || 'Failed to copy text')
}
}
useEffect(() => {
if (searchInputRef.current) {
searchInputRef.current.focus()
@ -150,38 +127,7 @@ const PopupContainer: React.FC<Props> = ({ base, resolve }) => {
dataSource={results}
renderItem={(item) => (
<List.Item>
<ResultItem>
<MetadataContainer>
<Text type="secondary">
{t('knowledge.source')}:{' '}
{item.file ? (
<a href={`http://file/${item.file.name}`} target="_blank" rel="noreferrer">
{item.file.origin_name}
</a>
) : item.metadata.type !== 'LocalPathLoader' ? (
<a href={item.metadata.source} target="_blank" rel="noreferrer">
{item.metadata.source}
</a>
) : (
// 处理预处理后的文件source
<a href={`file://${item.metadata.source}`} target="_blank" rel="noreferrer">
{item.metadata.source.split('/').pop() || item.metadata.source}
</a>
)}
</Text>
<ScoreTag>Score: {(item.score * 100).toFixed(1)}%</ScoreTag>
</MetadataContainer>
<TagContainer>
<Tooltip title={t('common.copy')}>
<CopyButton onClick={() => handleCopy(item.pageContent)}>
<CopyOutlined />
</CopyButton>
</Tooltip>
</TagContainer>
<Paragraph style={{ userSelect: 'text', marginBottom: 0 }}>
{highlightText(item.pageContent)}
</Paragraph>
</ResultItem>
<SearchItemRenderer item={item} searchKeyword={searchKeyword} />
</List.Item>
)}
/>
@ -204,69 +150,6 @@ const LoadingContainer = styled.div`
height: 200px;
`
const TagContainer = styled.div`
position: absolute;
top: 58px;
right: 16px;
display: flex;
align-items: center;
gap: 8px;
opacity: 0;
transition: opacity 0.2s;
`
const ResultItem = styled.div`
width: 100%;
position: relative;
padding: 16px;
background: var(--color-background-soft);
border-radius: 8px;
&:hover {
${TagContainer} {
opacity: 1 !important;
}
}
`
const ScoreTag = styled.div`
padding: 2px 8px;
background: var(--color-primary);
color: white;
border-radius: 4px;
font-size: 12px;
flex-shrink: 0;
`
const CopyButton = styled.div`
display: flex;
align-items: center;
justify-content: center;
width: 24px;
height: 24px;
background: var(--color-background-mute);
color: var(--color-text);
border-radius: 4px;
cursor: pointer;
transition: all 0.2s;
&:hover {
background: var(--color-primary);
color: white;
}
`
const MetadataContainer = styled.div`
display: flex;
justify-content: space-between;
align-items: center;
gap: 16px;
margin-bottom: 8px;
padding-bottom: 8px;
border-bottom: 1px solid var(--color-border);
user-select: text;
`
const SearchIcon = styled.div`
width: 32px;
height: 32px;

View File

@ -6,7 +6,7 @@ import { isEmbeddingModel, isRerankModel } from '@renderer/config/models'
import { useProviders } from '@renderer/hooks/useProvider'
import { getModelUniqId } from '@renderer/services/ModelService'
import { KnowledgeBase, PreprocessProvider } from '@renderer/types'
import { Input, Select, SelectProps, Slider } from 'antd'
import { Input, Segmented, Select, SelectProps, Slider } from 'antd'
import { useTranslation } from 'react-i18next'
import { SettingsItem, SettingsPanel } from './styles'
@ -47,6 +47,21 @@ const GeneralSettingsPanel: React.FC<GeneralSettingsPanelProps> = ({
/>
</SettingsItem>
<SettingsItem>
<div className="settings-label">
{t('settings.tool.preprocess.title')}
<InfoTooltip title={t('settings.tool.preprocess.tooltip')} placement="right" />
</div>
<Select
value={selectedDocPreprocessProvider?.id}
style={{ width: '100%' }}
onChange={handleDocPreprocessChange}
placeholder={t('settings.tool.preprocess.provider_placeholder')}
options={docPreprocessSelectOptions}
allowClear
/>
</SettingsItem>
<SettingsItem>
<div className="settings-label">
{t('models.embedding_model')}
@ -91,20 +106,54 @@ const GeneralSettingsPanel: React.FC<GeneralSettingsPanelProps> = ({
/>
</SettingsItem>
<SettingsItem>
<div className="settings-label">
{t('settings.tool.preprocess.title')}
<InfoTooltip title={t('settings.tool.preprocess.tooltip')} placement="right" />
</div>
<Select
value={selectedDocPreprocessProvider?.id}
style={{ width: '100%' }}
onChange={handleDocPreprocessChange}
placeholder={t('settings.tool.preprocess.provider_placeholder')}
options={docPreprocessSelectOptions}
allowClear
/>
</SettingsItem>
{newBase.framework !== 'embedjs' && (
<>
<SettingsItem>
<div className="settings-label">
{t('knowledge.retriever')}
<InfoTooltip title={t('knowledge.retriever_tooltip')} placement="right" />
</div>
<Segmented
value={newBase.retriever?.mode || 'hybrid'}
onChange={(value) =>
setNewBase({ ...newBase, retriever: { mode: value as 'vector' | 'bm25' | 'hybrid' } })
}
options={[
{ label: t('knowledge.retriever_hybrid'), value: 'hybrid' },
{ label: t('knowledge.retriever_vector'), value: 'vector' },
{ label: t('knowledge.retriever_bm25'), value: 'bm25' }
]}
/>
</SettingsItem>
{newBase.retriever?.mode === 'hybrid' && (
<SettingsItem>
<div className="settings-label">{t('knowledge.retriever_hybrid_weight.title')}</div>
<Slider
style={{ width: '100%' }}
min={0}
max={1}
step={0.1}
value={newBase.retriever?.weight || 0.5}
marks={{
0: t('knowledge.retriever_hybrid_weight.bm25'),
0.5: t('knowledge.retriever_hybrid_weight.recommended'),
1: t('knowledge.retriever_hybrid_weight.vector')
}}
onChange={(value) =>
setNewBase({
...newBase,
retriever: {
...newBase.retriever,
mode: 'hybrid',
weight: value
}
})
}
/>
</SettingsItem>
)}
</>
)}
<SettingsItem>
<div className="settings-label">

View File

@ -25,9 +25,9 @@ const KnowledgeBaseFormModal: React.FC<KnowledgeBaseFormModalProps> = ({ panels,
maskClosable={false}
centered
transitionName="animation-move-down"
width="min(800px, 70vw)"
width="min(900px, 75vw)"
styles={{
body: { padding: 0, height: 550 },
body: { padding: 0, height: 700 },
header: {
padding: '10px 15px',
borderBottom: '0.5px solid var(--color-border)',

View File

@ -0,0 +1,57 @@
import { loggerService } from '@logger'
import { nanoid } from '@reduxjs/toolkit'
import { useKnowledge } from '@renderer/hooks/useKnowledge'
import { useKnowledgeBaseForm } from '@renderer/hooks/useKnowledgeBaseForm'
import { KnowledgeBase, MigrationModeEnum } from '@renderer/types'
import { formatErrorMessage } from '@renderer/utils/error'
import { Flex, Tag } from 'antd'
import { FC, useCallback } from 'react'
import { useTranslation } from 'react-i18next'
const logger = loggerService.withContext('MigrationInfoTag')
const MigrationInfoTag: FC<{ base: KnowledgeBase }> = ({ base: _base }) => {
const { t } = useTranslation()
const { migrateBase } = useKnowledge(_base.id)
const { newBase } = useKnowledgeBaseForm(_base)
// 处理嵌入模型更改迁移
const handleMigration = useCallback(async () => {
const migratedBase = { ...newBase, id: nanoid() }
try {
await migrateBase(migratedBase, MigrationModeEnum.MigrationToLangChain)
} catch (error) {
logger.error('KnowledgeBase migration failed:', error as Error)
window.message.error(t('knowledge.migrate.error.failed') + ': ' + formatErrorMessage(error))
}
}, [newBase, migrateBase, t])
const onClick = async () => {
window.modal.confirm({
title: t('knowledge.migrate.confirm.title'),
content: (
<Flex vertical align="self-start">
<span>{t('knowledge.migrate.migrate_to_langchain.content')}</span>
</Flex>
),
okText: t('knowledge.migrate.confirm.ok'),
centered: true,
onOk: handleMigration
})
}
return (
<Tag
color="blue"
style={{
borderRadius: 20,
margin: 0,
cursor: 'pointer'
}}
onClick={onClick}>
{t('knowledge.migrate.migrate_to_langchain.info')}
</Tag>
)
}
export default MigrationInfoTag

View File

@ -31,10 +31,18 @@ const QuotaTag: FC<{ base: KnowledgeBase; providerId: PreprocessProviderId; quot
const userId = getStoreSetting('userId')
const baseParams = getKnowledgeBaseParams(base)
try {
const response = await window.api.knowledgeBase.checkQuota({
base: baseParams,
userId: userId as string
})
let response: number
if (base.framework === 'langchain') {
response = await window.api.knowledgeBase.checkQuota({
base: baseParams,
userId: userId as string
})
} else {
response = await window.api.knowledgeBase.checkQuota({
base: baseParams,
userId: userId as string
})
}
setQuota(response)
} catch (error) {
logger.error('[KnowledgeContent] Error checking quota:', error as Error)

View File

@ -21,6 +21,7 @@ import {
ItemHeader,
KnowledgeEmptyView,
RefreshIcon,
ResponsiveButton,
StatusIconWrapper
} from '../KnowledgeContent'
@ -66,7 +67,7 @@ const KnowledgeDirectories: FC<KnowledgeContentProps> = ({ selectedBase, progres
return (
<ItemContainer>
<ItemHeader>
<Button
<ResponsiveButton
type="primary"
icon={<PlusIcon size={16} />}
onClick={(e) => {
@ -75,7 +76,7 @@ const KnowledgeDirectories: FC<KnowledgeContentProps> = ({ selectedBase, progres
}}
disabled={disabled}>
{t('knowledge.add_directory')}
</Button>
</ResponsiveButton>
</ItemHeader>
<ItemFlexColumn>
{directoryItems.length === 0 && <KnowledgeEmptyView />}

View File

@ -6,7 +6,7 @@ import FileItem from '@renderer/pages/files/FileItem'
import StatusIcon from '@renderer/pages/knowledge/components/StatusIcon'
import FileManager from '@renderer/services/FileManager'
import { getProviderName } from '@renderer/services/ProviderService'
import { FileMetadata, FileType, FileTypes, KnowledgeBase, KnowledgeItem } from '@renderer/types'
import { FileMetadata, FileTypes, isKnowledgeFileItem, KnowledgeBase, KnowledgeItem } from '@renderer/types'
import { formatFileSize, uuid } from '@renderer/utils'
import { bookExts, documentExts, textExts, thirdPartyApplicationExts } from '@shared/config/constant'
import { Button, Tooltip, Upload } from 'antd'
@ -28,6 +28,7 @@ import {
ItemHeader,
KnowledgeEmptyView,
RefreshIcon,
ResponsiveButton,
StatusIconWrapper
} from '../KnowledgeContent'
@ -138,7 +139,7 @@ const KnowledgeFiles: FC<KnowledgeContentProps> = ({ selectedBase, progressMap,
return (
<ItemContainer>
<ItemHeader>
<Button
<ResponsiveButton
type="primary"
icon={<PlusIcon size={16} />}
onClick={(e) => {
@ -147,7 +148,7 @@ const KnowledgeFiles: FC<KnowledgeContentProps> = ({ selectedBase, progressMap,
}}
disabled={disabled}>
{t('knowledge.add_file')}
</Button>
</ResponsiveButton>
</ItemHeader>
<ItemFlexColumn>
@ -178,7 +179,10 @@ const KnowledgeFiles: FC<KnowledgeContentProps> = ({ selectedBase, progressMap,
scrollerStyle={{ height: windowHeight - 270 }}
autoHideScrollbar>
{(item) => {
const file = item.content as FileType
if (!isKnowledgeFileItem(item)) {
return null
}
const file = item.content
return (
<div style={{ height: '75px', paddingTop: '12px' }}>
<FileItem

View File

@ -14,7 +14,14 @@ import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import StatusIcon from '../components/StatusIcon'
import { FlexAlignCenter, ItemContainer, ItemHeader, KnowledgeEmptyView, StatusIconWrapper } from '../KnowledgeContent'
import {
FlexAlignCenter,
ItemContainer,
ItemHeader,
KnowledgeEmptyView,
ResponsiveButton,
StatusIconWrapper
} from '../KnowledgeContent'
interface KnowledgeContentProps {
selectedBase: KnowledgeBase
@ -73,7 +80,7 @@ const KnowledgeNotes: FC<KnowledgeContentProps> = ({ selectedBase }) => {
return (
<ItemContainer>
<ItemHeader>
<Button
<ResponsiveButton
type="primary"
icon={<PlusIcon size={16} />}
onClick={(e) => {
@ -82,7 +89,7 @@ const KnowledgeNotes: FC<KnowledgeContentProps> = ({ selectedBase }) => {
}}
disabled={disabled}>
{t('knowledge.add_note')}
</Button>
</ResponsiveButton>
</ItemHeader>
<ItemFlexColumn>
{noteItems.length === 0 && <KnowledgeEmptyView />}

View File

@ -22,6 +22,7 @@ import {
ItemHeader,
KnowledgeEmptyView,
RefreshIcon,
ResponsiveButton,
StatusIconWrapper
} from '../KnowledgeContent'
@ -85,7 +86,7 @@ const KnowledgeSitemaps: FC<KnowledgeContentProps> = ({ selectedBase }) => {
return (
<ItemContainer>
<ItemHeader>
<Button
<ResponsiveButton
type="primary"
icon={<PlusIcon size={16} />}
onClick={(e) => {
@ -94,7 +95,7 @@ const KnowledgeSitemaps: FC<KnowledgeContentProps> = ({ selectedBase }) => {
}}
disabled={disabled}>
{t('knowledge.add_sitemap')}
</Button>
</ResponsiveButton>
</ItemHeader>
<ItemFlexColumn>
{sitemapItems.length === 0 && <KnowledgeEmptyView />}

View File

@ -21,6 +21,7 @@ import {
ItemHeader,
KnowledgeEmptyView,
RefreshIcon,
ResponsiveButton,
StatusIconWrapper
} from '../KnowledgeContent'
@ -113,7 +114,7 @@ const KnowledgeUrls: FC<KnowledgeContentProps> = ({ selectedBase }) => {
return (
<ItemContainer>
<ItemHeader>
<Button
<ResponsiveButton
type="primary"
icon={<PlusIcon size={16} />}
onClick={(e) => {
@ -122,7 +123,7 @@ const KnowledgeUrls: FC<KnowledgeContentProps> = ({ selectedBase }) => {
}}
disabled={disabled}>
{t('knowledge.add_url')}
</Button>
</ResponsiveButton>
</ItemHeader>
<ItemFlexColumn>
{urlItems.length === 0 && <KnowledgeEmptyView />}

View File

@ -0,0 +1,174 @@
import { DeleteOutlined } from '@ant-design/icons'
import { loggerService } from '@logger'
import Ellipsis from '@renderer/components/Ellipsis'
import VideoPopup from '@renderer/components/Popups/VideoPopup'
import Scrollbar from '@renderer/components/Scrollbar'
import { useKnowledge } from '@renderer/hooks/useKnowledge'
import { getProviderName } from '@renderer/services/ProviderService'
import { FileTypes, isKnowledgeVideoItem, KnowledgeBase, KnowledgeItem } from '@renderer/types'
import { Button, Tooltip } from 'antd'
import dayjs from 'dayjs'
import { Plus } from 'lucide-react'
import VirtualList from 'rc-virtual-list'
import { FC, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
const logger = loggerService.withContext('KnowledgeVideos')
import FileItem from '@renderer/pages/files/FileItem'
import { formatFileSize } from '@renderer/utils'
import StatusIcon from '../components/StatusIcon'
import {
ClickableSpan,
FlexAlignCenter,
ItemContainer,
ItemHeader,
KnowledgeEmptyView,
RefreshIcon,
ResponsiveButton,
StatusIconWrapper
} from '../KnowledgeContent'
interface KnowledgeContentProps {
selectedBase: KnowledgeBase
}
const getDisplayTime = (item: KnowledgeItem) => {
const timestamp = item.updated_at && item.updated_at > item.created_at ? item.updated_at : item.created_at
return dayjs(timestamp).format('MM-DD HH:mm')
}
const KnowledgeVideos: FC<KnowledgeContentProps> = ({ selectedBase }) => {
const { t } = useTranslation()
const { base, videoItems, refreshItem, removeItem, getProcessingStatus, addVideo } = useKnowledge(
selectedBase.id || ''
)
const [windowHeight, setWindowHeight] = useState(window.innerHeight)
const providerName = getProviderName(base?.model)
const disabled = !base?.version || !providerName
useEffect(() => {
const handleResize = () => {
setWindowHeight(window.innerHeight)
}
window.addEventListener('resize', handleResize)
return () => window.removeEventListener('resize', handleResize)
}, [])
if (!base) {
return null
}
const handleAddVideo = async () => {
if (disabled) {
return
}
const result = await VideoPopup.show({
title: t('knowledge.add_video')
})
if (!result) {
return
}
if (result && result.videoFile && result.srtFile) {
addVideo([result.videoFile, result.srtFile])
}
}
return (
<ItemContainer>
<ItemHeader>
<ResponsiveButton
type="primary"
icon={<Plus size={16} />}
onClick={(e) => {
e.stopPropagation()
handleAddVideo()
}}
disabled={disabled}>
{t('knowledge.add_video')}
</ResponsiveButton>
</ItemHeader>
<ItemFlexColumn>
{videoItems.length === 0 ? (
<KnowledgeEmptyView />
) : (
<VirtualList
data={videoItems.reverse()}
height={windowHeight - 270}
itemHeight={75}
itemKey="id"
styles={{
verticalScrollBar: { width: 6 },
verticalScrollBarThumb: { background: 'var(--color-scrollbar-thumb)' }
}}>
{(item) => {
if (!isKnowledgeVideoItem(item)) {
return null
}
const files = item.content
const videoFile = files.find((f) => f.type === FileTypes.VIDEO)
if (!videoFile) {
logger.warn('Knowledge item is missing video file data.', { itemId: item.id })
return null
}
return (
<div style={{ height: '75px', paddingTop: '12px' }}>
<FileItem
key={item.id}
fileInfo={{
name: (
<ClickableSpan onClick={() => window.api.file.openFileWithRelativePath(videoFile)}>
<Ellipsis>
<Tooltip title={videoFile.origin_name}>{videoFile.origin_name}</Tooltip>
</Ellipsis>
</ClickableSpan>
),
ext: videoFile.ext,
extra: `${getDisplayTime(item)} · ${formatFileSize(videoFile.size)}`,
actions: (
<FlexAlignCenter>
{item.uniqueId && (
<Button type="text" icon={<RefreshIcon />} onClick={() => refreshItem(item)} />
)}
<StatusIconWrapper>
<StatusIcon
sourceId={item.id}
base={base}
getProcessingStatus={getProcessingStatus}
type="file"
/>
</StatusIconWrapper>
<Button type="text" danger onClick={() => removeItem(item)} icon={<DeleteOutlined />} />
</FlexAlignCenter>
)
}}
/>
</div>
)
}}
</VirtualList>
)}
</ItemFlexColumn>
</ItemContainer>
)
}
const ItemFlexColumn = styled(Scrollbar)`
display: flex;
flex-direction: column;
gap: 10px;
padding: 20px 16px;
height: calc(100vh - 135px);
`
export default KnowledgeVideos

Some files were not shown because too many files have changed in this diff Show More