mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-10 07:19:02 +08:00
refactor(translate): 重构语言检测功能,使用通用聊天接口替代专用接口
- 移除专用的语言检测接口 fetchLanguageDetection - 使用现有的 fetchChatCompletion 接口实现语言检测功能 - 处理 QwenMT 模型的特殊逻辑 - 优化语言检测的提示词和参数处理
This commit is contained in:
parent
4960eb712b
commit
01d7f784f7
@ -7,8 +7,7 @@ import { CompletionsParams } from '@renderer/aiCore/legacy/middleware/schemas'
|
|||||||
import { AiSdkMiddlewareConfig } from '@renderer/aiCore/middleware/AiSdkMiddlewareBuilder'
|
import { AiSdkMiddlewareConfig } from '@renderer/aiCore/middleware/AiSdkMiddlewareBuilder'
|
||||||
import { buildStreamTextParams } from '@renderer/aiCore/transformParameters'
|
import { buildStreamTextParams } from '@renderer/aiCore/transformParameters'
|
||||||
import type { StreamTextParams } from '@renderer/aiCore/types'
|
import type { StreamTextParams } from '@renderer/aiCore/types'
|
||||||
import { isDedicatedImageGenerationModel, isEmbeddingModel, isQwenMTModel } from '@renderer/config/models'
|
import { isDedicatedImageGenerationModel, isEmbeddingModel } from '@renderer/config/models'
|
||||||
import { LANG_DETECT_PROMPT } from '@renderer/config/prompts'
|
|
||||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||||
import i18n from '@renderer/i18n'
|
import i18n from '@renderer/i18n'
|
||||||
import store from '@renderer/store'
|
import store from '@renderer/store'
|
||||||
@ -20,7 +19,6 @@ import { removeSpecialCharactersForTopicName } from '@renderer/utils'
|
|||||||
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
||||||
import { findFileBlocks, getMainTextContent } from '@renderer/utils/messageUtils/find'
|
import { findFileBlocks, getMainTextContent } from '@renderer/utils/messageUtils/find'
|
||||||
import { containsSupportedVariables, replacePromptVariables } from '@renderer/utils/prompt'
|
import { containsSupportedVariables, replacePromptVariables } from '@renderer/utils/prompt'
|
||||||
import { getTranslateOptions } from '@renderer/utils/translate'
|
|
||||||
import { isEmpty, takeRight } from 'lodash'
|
import { isEmpty, takeRight } from 'lodash'
|
||||||
|
|
||||||
import AiProviderNew from '../aiCore/index_new'
|
import AiProviderNew from '../aiCore/index_new'
|
||||||
@ -172,74 +170,6 @@ export async function fetchChatCompletion({
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
interface FetchLanguageDetectionProps {
|
|
||||||
text: string
|
|
||||||
onResponse?: (text: string, isComplete: boolean) => void
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 检测文本语言
|
|
||||||
* @param params - 参数对象
|
|
||||||
* @param {string} params.text - 需要检测语言的文本内容
|
|
||||||
* @param {function} [params.onResponse] - 流式响应回调函数,用于实时获取检测结果
|
|
||||||
* @returns {Promise<string>} 返回检测到的语言代码,如果检测失败会抛出错误
|
|
||||||
* @throws {Error}
|
|
||||||
*/
|
|
||||||
export async function fetchLanguageDetection({ text, onResponse }: FetchLanguageDetectionProps) {
|
|
||||||
const translateLanguageOptions = await getTranslateOptions()
|
|
||||||
const listLang = translateLanguageOptions.map((item) => item.langCode)
|
|
||||||
const listLangText = JSON.stringify(listLang)
|
|
||||||
|
|
||||||
const model = getQuickModel() || getDefaultModel()
|
|
||||||
if (!model) {
|
|
||||||
throw new Error(i18n.t('error.model.not_exists'))
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isQwenMTModel(model)) {
|
|
||||||
logger.info('QwenMT cannot be used for language detection.')
|
|
||||||
if (isQwenMTModel(model)) {
|
|
||||||
throw new Error(i18n.t('translate.error.detect.qwen_mt'))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const provider = getProviderByModel(model)
|
|
||||||
|
|
||||||
if (!hasApiKey(provider)) {
|
|
||||||
throw new Error(i18n.t('error.no_api_key'))
|
|
||||||
}
|
|
||||||
|
|
||||||
const assistant: Assistant = getDefaultAssistant()
|
|
||||||
|
|
||||||
assistant.model = model
|
|
||||||
assistant.settings = {
|
|
||||||
temperature: 0.7
|
|
||||||
}
|
|
||||||
assistant.prompt = LANG_DETECT_PROMPT.replace('{{list_lang}}', listLangText).replace('{{input}}', text)
|
|
||||||
|
|
||||||
const isSupportedStreamOutput = () => {
|
|
||||||
if (!onResponse) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
const stream = isSupportedStreamOutput()
|
|
||||||
|
|
||||||
const params: CompletionsParams = {
|
|
||||||
callType: 'translate-lang-detect',
|
|
||||||
messages: 'follow system prompt',
|
|
||||||
assistant,
|
|
||||||
streamOutput: stream,
|
|
||||||
enableReasoning: false,
|
|
||||||
shouldThrow: true,
|
|
||||||
onResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
const AI = new AiProvider(provider)
|
|
||||||
|
|
||||||
return (await AI.completions(params)).getText()
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function fetchMessagesSummary({ messages, assistant }: { messages: Message[]; assistant: Assistant }) {
|
export async function fetchMessagesSummary({ messages, assistant }: { messages: Message[]; assistant: Assistant }) {
|
||||||
let prompt = (getStoreSetting('topicNamingPrompt') as string) || i18n.t('prompts.title')
|
let prompt = (getStoreSetting('topicNamingPrompt') as string) || i18n.t('prompts.title')
|
||||||
const model = getQuickModel() || assistant.model || getDefaultModel()
|
const model = getQuickModel() || assistant.model || getDefaultModel()
|
||||||
|
|||||||
@ -1,10 +1,15 @@
|
|||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
|
import { isQwenMTModel } from '@renderer/config/models'
|
||||||
|
import { LANG_DETECT_PROMPT } from '@renderer/config/prompts'
|
||||||
import { builtinLanguages as builtinLanguages, LanguagesEnum, UNKNOWN } from '@renderer/config/translate'
|
import { builtinLanguages as builtinLanguages, LanguagesEnum, UNKNOWN } from '@renderer/config/translate'
|
||||||
import db from '@renderer/databases'
|
import db from '@renderer/databases'
|
||||||
import { fetchLanguageDetection } from '@renderer/services/ApiService'
|
import i18n from '@renderer/i18n'
|
||||||
|
import { fetchChatCompletion } from '@renderer/services/ApiService'
|
||||||
|
import { getDefaultAssistant, getDefaultModel, getQuickModel } from '@renderer/services/AssistantService'
|
||||||
import { estimateTextTokens } from '@renderer/services/TokenService'
|
import { estimateTextTokens } from '@renderer/services/TokenService'
|
||||||
import { getAllCustomLanguages } from '@renderer/services/TranslateService'
|
import { getAllCustomLanguages } from '@renderer/services/TranslateService'
|
||||||
import { TranslateLanguage, TranslateLanguageCode } from '@renderer/types'
|
import { Assistant, TranslateLanguage, TranslateLanguageCode } from '@renderer/types'
|
||||||
|
import { Chunk, ChunkType } from '@renderer/types/chunk'
|
||||||
import { franc } from 'franc-min'
|
import { franc } from 'franc-min'
|
||||||
import React, { RefObject } from 'react'
|
import React, { RefObject } from 'react'
|
||||||
import { sliceByTokens } from 'tokenx'
|
import { sliceByTokens } from 'tokenx'
|
||||||
@ -55,13 +60,41 @@ export const detectLanguage = async (inputText: string): Promise<TranslateLangua
|
|||||||
const detectLanguageByLLM = async (inputText: string): Promise<TranslateLanguageCode> => {
|
const detectLanguageByLLM = async (inputText: string): Promise<TranslateLanguageCode> => {
|
||||||
logger.info('Detect language by llm')
|
logger.info('Detect language by llm')
|
||||||
let detectedLang = ''
|
let detectedLang = ''
|
||||||
await fetchLanguageDetection({
|
const text = sliceByTokens(inputText, 0, 100)
|
||||||
text: sliceByTokens(inputText, 0, 100),
|
|
||||||
onResponse: (text) => {
|
const translateLanguageOptions = await getTranslateOptions()
|
||||||
detectedLang = text.replace(/^\s*\n+/g, '')
|
const listLang = translateLanguageOptions.map((item) => item.langCode)
|
||||||
|
const listLangText = JSON.stringify(listLang)
|
||||||
|
|
||||||
|
const model = getQuickModel() || getDefaultModel()
|
||||||
|
if (!model) {
|
||||||
|
throw new Error(i18n.t('error.model.not_exists'))
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isQwenMTModel(model)) {
|
||||||
|
logger.info('QwenMT cannot be used for language detection.')
|
||||||
|
if (isQwenMTModel(model)) {
|
||||||
|
throw new Error(i18n.t('translate.error.detect.qwen_mt'))
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
return detectedLang
|
|
||||||
|
const assistant: Assistant = getDefaultAssistant()
|
||||||
|
|
||||||
|
assistant.model = model
|
||||||
|
assistant.settings = {
|
||||||
|
temperature: 0.7
|
||||||
|
}
|
||||||
|
assistant.prompt = LANG_DETECT_PROMPT.replace('{{list_lang}}', listLangText).replace('{{input}}', text)
|
||||||
|
|
||||||
|
const onChunk: (chunk: Chunk) => void = (chunk: Chunk) => {
|
||||||
|
// 你的意思是,虽然写的是delta类型,但其实是完整拼接后的结果?
|
||||||
|
if (chunk.type === ChunkType.TEXT_DELTA) {
|
||||||
|
detectedLang = chunk.text
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await fetchChatCompletion({ prompt: 'follow system prompt', assistant, onChunkReceived: onChunk })
|
||||||
|
return detectedLang.trim()
|
||||||
}
|
}
|
||||||
|
|
||||||
const detectLanguageByFranc = (inputText: string): TranslateLanguageCode => {
|
const detectLanguageByFranc = (inputText: string): TranslateLanguageCode => {
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user