Merge remote-tracking branch 'origin/1600822305-patch-2' into TTSom

This commit is contained in:
1600822305 2025-04-14 23:37:05 +08:00
commit 227dc01c85
89 changed files with 16873 additions and 69 deletions

123
asr-server/embedded.js Normal file
View File

@ -0,0 +1,123 @@
/**
* 内置的ASR服务器模块
* 这个文件可以直接在Electron中运行不需要外部依赖
*/
// 使用Electron内置的Node.js模块
const http = require('http')
const path = require('path')
const fs = require('fs')
// 输出环境信息
console.log('ASR Server (Embedded) starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 创建HTTP服务器
const server = http.createServer((req, res) => {
try {
if (req.url === '/' || req.url === '/index.html') {
// 尝试多个可能的路径
const possiblePaths = [
// 当前目录
path.join(__dirname, 'index.html'),
// 上级目录
path.join(__dirname, '..', 'index.html'),
// 应用根目录
path.join(process.cwd(), 'index.html')
]
console.log('Possible index.html paths:', possiblePaths)
// 查找第一个存在的文件
let indexPath = null
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
indexPath = p
console.log(`Found index.html at: ${p}`)
break
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
if (indexPath) {
// 读取文件内容并发送
fs.readFile(indexPath, (err, data) => {
if (err) {
console.error('Error reading index.html:', err)
res.writeHead(500)
res.end('Error reading index.html')
return
}
res.writeHead(200, { 'Content-Type': 'text/html' })
res.end(data)
})
} else {
// 如果找不到文件返回一个简单的HTML页面
console.error('Could not find index.html, serving fallback page')
res.writeHead(200, { 'Content-Type': 'text/html' })
res.end(`
<!DOCTYPE html>
<html>
<head>
<title>ASR Server</title>
<style>
body { font-family: sans-serif; padding: 2em; }
h1 { color: #333; }
</style>
</head>
<body>
<h1>ASR Server is running</h1>
<p>This is a fallback page because the index.html file could not be found.</p>
<p>Server is running at: http://localhost:34515</p>
<p>Current directory: ${__dirname}</p>
<p>Working directory: ${process.cwd()}</p>
</body>
</html>
`)
}
} else {
// 处理其他请求
res.writeHead(404)
res.end('Not found')
}
} catch (error) {
console.error('Error handling request:', error)
res.writeHead(500)
res.end('Server error')
}
})
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
const port = 34515
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// 处理服务器错误
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

425
asr-server/index.html Normal file
View File

@ -0,0 +1,425 @@
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Cherry Studio ASR</title>
<style>
body {
font-family: sans-serif;
padding: 1em;
}
#status {
margin-top: 1em;
font-style: italic;
color: #555;
}
#result {
margin-top: 0.5em;
border: 1px solid #ccc;
padding: 0.5em;
min-height: 50px;
background: #f9f9f9;
}
</style>
</head>
<body>
<h1>浏览器语音识别中继页面</h1>
<p>这个页面需要在浏览器中保持打开,以便应用使用其语音识别功能。</p>
<div id="status">正在连接到服务器...</div>
<div id="result"></div>
<script>
const statusDiv = document.getElementById('status');
const resultDiv = document.getElementById('result');
// 尝试连接到WebSocket服务器
let ws;
let reconnectAttempts = 0;
const maxReconnectAttempts = 5;
const reconnectInterval = 2000; // 2秒
function connectWebSocket() {
try {
ws = new WebSocket('ws://localhost:34515');
ws.onopen = () => {
reconnectAttempts = 0;
updateStatus('已连接到服务器,等待指令...');
ws.send(JSON.stringify({ type: 'identify', role: 'browser' }));
};
ws.onmessage = handleMessage;
ws.onerror = (error) => {
console.error('[Browser Page] WebSocket Error:', error);
updateStatus('WebSocket 连接错误!请检查服务器是否运行。');
};
ws.onclose = () => {
console.log('[Browser Page] WebSocket Connection Closed');
updateStatus('与服务器断开连接。尝试重新连接...');
stopRecognition();
// 尝试重新连接
if (reconnectAttempts < maxReconnectAttempts) {
reconnectAttempts++;
updateStatus(`与服务器断开连接。尝试重新连接 (${reconnectAttempts}/${maxReconnectAttempts})...`);
setTimeout(connectWebSocket, reconnectInterval);
} else {
updateStatus('无法连接到服务器。请刷新页面或重启应用。');
}
};
} catch (error) {
console.error('[Browser Page] Error creating WebSocket:', error);
updateStatus('创建WebSocket连接时出错。请刷新页面或重启应用。');
}
}
// 初始连接
connectWebSocket();
let recognition = null;
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
function updateStatus(message) {
console.log(`[Browser Page Status] ${message}`);
statusDiv.textContent = message;
}
function handleMessage(event) {
let data;
try {
data = JSON.parse(event.data);
console.log('[Browser Page] Received command:', data);
} catch (e) {
console.error('[Browser Page] Received non-JSON message:', event.data);
return;
}
if (data.type === 'start') {
startRecognition();
} else if (data.type === 'stop') {
stopRecognition();
} else if (data.type === 'reset') {
// 强制重置语音识别
forceResetRecognition();
} else {
console.warn('[Browser Page] Received unknown command type:', data.type);
}
};
function setupRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:此浏览器不支持 Web Speech API。');
return false;
}
if (recognition && recognition.recognizing) {
console.log('[Browser Page] Recognition already active.');
return true;
}
recognition = new SpeechRecognition();
recognition.lang = 'zh-CN';
recognition.continuous = true;
recognition.interimResults = true;
// 增加以下设置提高语音识别的可靠性
recognition.maxAlternatives = 3; // 返回多个可能的识别结果
// 设置较短的语音识别时间,使用户能更快地看到结果
// 注意:这个属性不是标准的,可能不是所有浏览器都支持
try {
// @ts-ignore
recognition.audioStart = 0.1; // 尝试设置较低的起始音量阈值
} catch (e) {
console.log('[Browser Page] audioStart property not supported');
}
recognition.onstart = () => {
updateStatus("🎤 正在识别...");
console.log('[Browser Page] SpeechRecognition started.');
};
recognition.onresult = (event) => {
console.log('[Browser Page] Recognition result event:', event);
let interim_transcript = '';
let final_transcript = '';
// 输出识别结果的详细信息便于调试
for (let i = event.resultIndex; i < event.results.length; ++i) {
const confidence = event.results[i][0].confidence;
console.log(`[Browser Page] Result ${i}: ${event.results[i][0].transcript} (Confidence: ${confidence.toFixed(2)})`);
if (event.results[i].isFinal) {
final_transcript += event.results[i][0].transcript;
} else {
interim_transcript += event.results[i][0].transcript;
}
}
const resultText = final_transcript || interim_transcript;
resultDiv.textContent = resultText;
// 更新状态显示
if (resultText) {
updateStatus(`🎤 正在识别... (已捕捉到语音)`);
}
if (ws.readyState === WebSocket.OPEN) {
console.log(`[Browser Page] Sending ${final_transcript ? 'final' : 'interim'} result to server:`, resultText);
ws.send(JSON.stringify({ type: 'result', data: { text: resultText, isFinal: !!final_transcript } }));
}
};
recognition.onerror = (event) => {
console.error(`[Browser Page] SpeechRecognition Error - Type: ${event.error}, Message: ${event.message}`);
// 根据错误类型提供更友好的错误提示
let errorMessage = '';
switch (event.error) {
case 'no-speech':
errorMessage = '未检测到语音,请确保麦克风工作正常并尝试说话。';
// 尝试重新启动语音识别
setTimeout(() => {
if (recognition) {
try {
recognition.start();
console.log('[Browser Page] Restarting recognition after no-speech error');
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
}
}
}, 1000);
break;
case 'audio-capture':
errorMessage = '无法捕获音频,请确保麦克风已连接并已授权。';
break;
case 'not-allowed':
errorMessage = '浏览器不允许使用麦克风,请检查权限设置。';
break;
case 'network':
errorMessage = '网络错误导致语音识别失败。';
break;
case 'aborted':
errorMessage = '语音识别被用户或系统中止。';
break;
default:
errorMessage = `识别错误: ${event.error}`;
}
updateStatus(`错误: ${errorMessage}`);
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({
type: 'error',
data: {
error: event.error,
message: errorMessage || event.message || `Recognition error: ${event.error}`
}
}));
}
};
recognition.onend = () => {
console.log('[Browser Page] SpeechRecognition ended.');
// 检查是否是由于错误或用户手动停止导致的结束
const isErrorOrStopped = statusDiv.textContent.includes('错误') || statusDiv.textContent.includes('停止');
if (!isErrorOrStopped) {
// 如果不是由于错误或手动停止,则自动重新启动语音识别
updateStatus("识别暂停,正在重新启动...");
// 保存当前的recognition对象
const currentRecognition = recognition;
// 尝试重新启动语音识别
setTimeout(() => {
try {
if (currentRecognition && currentRecognition === recognition) {
currentRecognition.start();
console.log('[Browser Page] Automatically restarting recognition');
} else {
// 如果recognition对象已经变化重新创建一个
setupRecognition();
if (recognition) {
recognition.start();
console.log('[Browser Page] Created new recognition instance and started');
}
}
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
updateStatus("识别已停止。等待指令...");
}
}, 300);
} else {
updateStatus("识别已停止。等待指令...");
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'stopped' }));
}
// 只有在手动停止或错误时才重置recognition对象
recognition = null;
}
};
return true;
}
function startRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:浏览器不支持 Web Speech API。');
return;
}
// 显示正在准备的状态
updateStatus('正在准备麦克风...');
if (recognition) {
console.log('[Browser Page] Recognition already exists, stopping first.');
stopRecognition();
}
if (!setupRecognition()) return;
console.log('[Browser Page] Attempting to start recognition...');
try {
// 设置更长的超时时间,确保有足够的时间获取麦克风权限
const micPermissionTimeout = setTimeout(() => {
updateStatus('获取麦克风权限超时,请刷新页面重试。');
}, 10000); // 10秒超时
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
}
})
.then(stream => {
clearTimeout(micPermissionTimeout);
console.log('[Browser Page] Microphone access granted.');
// 检查麦克风音量级别
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
const microphone = audioContext.createMediaStreamSource(stream);
const javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
javascriptNode.onaudioprocess = function () {
const array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let values = 0;
const length = array.length;
for (let i = 0; i < length; i++) {
values += (array[i]);
}
const average = values / length;
console.log('[Browser Page] Microphone volume level:', average);
// 如果音量太低,显示提示
if (average < 5) {
updateStatus('麦克风音量很低,请说话或检查麦克风设置。');
} else {
updateStatus('🎤 正在识别...');
}
// 只检查一次就断开连接
microphone.disconnect();
analyser.disconnect();
javascriptNode.disconnect();
};
// 释放测试用的音频流
setTimeout(() => {
stream.getTracks().forEach(track => track.stop());
audioContext.close();
}, 1000);
// 启动语音识别
if (recognition) {
recognition.start();
updateStatus('🎤 正在识别...');
} else {
updateStatus('错误Recognition 实例丢失。');
console.error('[Browser Page] Recognition instance lost before start.');
}
})
.catch(err => {
clearTimeout(micPermissionTimeout);
console.error('[Browser Page] Microphone access error:', err);
let errorMsg = `无法访问麦克风 (${err.name})`;
if (err.name === 'NotAllowedError') {
errorMsg = '麦克风访问被拒绝。请在浏览器设置中允许麦克风访问权限。';
} else if (err.name === 'NotFoundError') {
errorMsg = '未找到麦克风设备。请确保麦克风已连接。';
}
updateStatus(`错误: ${errorMsg}`);
recognition = null;
});
} catch (e) {
console.error('[Browser Page] Error calling recognition.start():', e);
updateStatus(`启动识别时出错: ${e.message}`);
recognition = null;
}
}
function stopRecognition() {
if (recognition) {
console.log('[Browser Page] Stopping recognition...');
updateStatus("正在停止识别...");
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error calling recognition.stop():', e);
recognition = null;
updateStatus("停止时出错,已强制重置。");
}
} else {
console.log('[Browser Page] Recognition not active, nothing to stop.');
updateStatus("识别未运行。");
}
}
function forceResetRecognition() {
console.log('[Browser Page] Force resetting recognition...');
updateStatus("强制重置语音识别...");
// 先尝试停止当前的识别
if (recognition) {
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error stopping recognition during reset:', e);
}
}
// 强制设置为null丢弃所有后续结果
recognition = null;
// 通知服务器已重置
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'reset_complete' }));
}
updateStatus("语音识别已重置,等待新指令。");
}
</script>
</body>
</html>

854
asr-server/package-lock.json generated Normal file
View File

@ -0,0 +1,854 @@
{
"name": "cherry-asr-server",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "cherry-asr-server",
"version": "1.0.0",
"dependencies": {
"express": "^4.18.2",
"ws": "^8.13.0"
}
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmmirror.com/accepts/-/accepts-1.3.8.tgz",
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"license": "MIT",
"dependencies": {
"mime-types": "~2.1.34",
"negotiator": "0.6.3"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/array-flatten": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
"license": "MIT"
},
"node_modules/body-parser": {
"version": "1.20.3",
"resolved": "https://registry.npmmirror.com/body-parser/-/body-parser-1.20.3.tgz",
"integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
"qs": "6.13.0",
"raw-body": "2.5.2",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmmirror.com/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/call-bind-apply-helpers": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/call-bound": {
"version": "1.0.4",
"resolved": "https://registry.npmmirror.com/call-bound/-/call-bound-1.0.4.tgz",
"integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.2",
"get-intrinsic": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmmirror.com/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"license": "MIT",
"dependencies": {
"safe-buffer": "5.2.1"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmmirror.com/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie": {
"version": "0.7.1",
"resolved": "https://registry.npmmirror.com/cookie/-/cookie-0.7.1.tgz",
"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie-signature": {
"version": "1.0.6",
"resolved": "https://registry.npmmirror.com/cookie-signature/-/cookie-signature-1.0.6.tgz",
"integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==",
"license": "MIT"
},
"node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"license": "MIT",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/depd": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/depd/-/depd-2.0.0.tgz",
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/destroy": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/destroy/-/destroy-1.2.0.tgz",
"integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
"license": "MIT",
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/dunder-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/dunder-proto/-/dunder-proto-1.0.1.tgz",
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.1",
"es-errors": "^1.3.0",
"gopd": "^1.2.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
"license": "MIT"
},
"node_modules/encodeurl": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/encodeurl/-/encodeurl-2.0.0.tgz",
"integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/es-define-property": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz",
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-object-atoms": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/escape-html": {
"version": "1.0.3",
"resolved": "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz",
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
"license": "MIT"
},
"node_modules/etag": {
"version": "1.8.1",
"resolved": "https://registry.npmmirror.com/etag/-/etag-1.8.1.tgz",
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/express": {
"version": "4.21.2",
"resolved": "https://registry.npmmirror.com/express/-/express-4.21.2.tgz",
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
"license": "MIT",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "1.20.3",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.7.1",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "1.3.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"merge-descriptors": "1.0.3",
"methods": "~1.1.2",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.12",
"proxy-addr": "~2.0.7",
"qs": "6.13.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.19.0",
"serve-static": "1.16.2",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.10.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"node_modules/finalhandler": {
"version": "1.3.1",
"resolved": "https://registry.npmmirror.com/finalhandler/-/finalhandler-1.3.1.tgz",
"integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"statuses": "2.0.1",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmmirror.com/forwarded/-/forwarded-0.2.0.tgz",
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmmirror.com/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-intrinsic": {
"version": "1.3.0",
"resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.2",
"es-define-property": "^1.0.1",
"es-errors": "^1.3.0",
"es-object-atoms": "^1.1.1",
"function-bind": "^1.1.2",
"get-proto": "^1.0.1",
"gopd": "^1.2.0",
"has-symbols": "^1.1.0",
"hasown": "^2.0.2",
"math-intrinsics": "^1.1.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/get-proto/-/get-proto-1.0.1.tgz",
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
"license": "MIT",
"dependencies": {
"dunder-proto": "^1.0.1",
"es-object-atoms": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz",
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz",
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz",
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"license": "MIT",
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/http-errors": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/http-errors/-/http-errors-2.0.0.tgz",
"integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
"license": "MIT",
"dependencies": {
"depd": "2.0.0",
"inherits": "2.0.4",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"toidentifier": "1.0.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"license": "ISC"
},
"node_modules/ipaddr.js": {
"version": "1.9.1",
"resolved": "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
"license": "MIT",
"engines": {
"node": ">= 0.10"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmmirror.com/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/merge-descriptors": {
"version": "1.0.3",
"resolved": "https://registry.npmmirror.com/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
"integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/methods": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/methods/-/methods-1.1.2.tgz",
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime": {
"version": "1.6.0",
"resolved": "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"license": "MIT",
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"license": "MIT"
},
"node_modules/negotiator": {
"version": "0.6.3",
"resolved": "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.3.tgz",
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/object-inspect": {
"version": "1.13.4",
"resolved": "https://registry.npmmirror.com/object-inspect/-/object-inspect-1.13.4.tgz",
"integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmmirror.com/on-finished/-/on-finished-2.4.1.tgz",
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
"license": "MIT",
"dependencies": {
"ee-first": "1.1.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/parseurl": {
"version": "1.3.3",
"resolved": "https://registry.npmmirror.com/parseurl/-/parseurl-1.3.3.tgz",
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/path-to-regexp": {
"version": "0.1.12",
"resolved": "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT"
},
"node_modules/proxy-addr": {
"version": "2.0.7",
"resolved": "https://registry.npmmirror.com/proxy-addr/-/proxy-addr-2.0.7.tgz",
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
"license": "MIT",
"dependencies": {
"forwarded": "0.2.0",
"ipaddr.js": "1.9.1"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/qs": {
"version": "6.13.0",
"resolved": "https://registry.npmmirror.com/qs/-/qs-6.13.0.tgz",
"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
"license": "BSD-3-Clause",
"dependencies": {
"side-channel": "^1.0.6"
},
"engines": {
"node": ">=0.6"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/raw-body": {
"version": "2.5.2",
"resolved": "https://registry.npmmirror.com/raw-body/-/raw-body-2.5.2.tgz",
"integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"license": "MIT"
},
"node_modules/send": {
"version": "0.19.0",
"resolved": "https://registry.npmmirror.com/send/-/send-0.19.0.tgz",
"integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"mime": "1.6.0",
"ms": "2.1.3",
"on-finished": "2.4.1",
"range-parser": "~1.2.1",
"statuses": "2.0.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/send/node_modules/encodeurl": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/encodeurl/-/encodeurl-1.0.2.tgz",
"integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/send/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/serve-static": {
"version": "1.16.2",
"resolved": "https://registry.npmmirror.com/serve-static/-/serve-static-1.16.2.tgz",
"integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==",
"license": "MIT",
"dependencies": {
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
"send": "0.19.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/setprototypeof": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/setprototypeof/-/setprototypeof-1.2.0.tgz",
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
"license": "ISC"
},
"node_modules/side-channel": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/side-channel/-/side-channel-1.1.0.tgz",
"integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3",
"side-channel-list": "^1.0.0",
"side-channel-map": "^1.0.1",
"side-channel-weakmap": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-list": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/side-channel-list/-/side-channel-list-1.0.0.tgz",
"integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-map": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/side-channel-map/-/side-channel-map-1.0.1.tgz",
"integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-weakmap": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
"integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3",
"side-channel-map": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/statuses": {
"version": "2.0.1",
"resolved": "https://registry.npmmirror.com/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/toidentifier": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/toidentifier/-/toidentifier-1.0.1.tgz",
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
"license": "MIT",
"engines": {
"node": ">=0.6"
}
},
"node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmmirror.com/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"license": "MIT",
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
"license": "MIT",
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/vary": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/vary/-/vary-1.1.2.tgz",
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/ws": {
"version": "8.18.1",
"resolved": "https://registry.npmmirror.com/ws/-/ws-8.18.1.tgz",
"integrity": "sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w==",
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
}
}
}

10
asr-server/package.json Normal file
View File

@ -0,0 +1,10 @@
{
"name": "cherry-asr-server",
"version": "1.0.0",
"description": "Cherry Studio ASR Server",
"main": "server.js",
"dependencies": {
"express": "^4.18.2",
"ws": "^8.13.0"
}
}

269
asr-server/server.js Normal file
View File

@ -0,0 +1,269 @@
// 检查依赖项
try {
console.log('ASR Server starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 检查必要的依赖项
const checkDependency = (name) => {
try {
require(name) // Removed unused variable 'module'
console.log(`Successfully loaded dependency: ${name}`)
return true
} catch (error) {
console.error(`Failed to load dependency: ${name}`, error.message)
return false
}
}
// 检查所有必要的依赖项
const dependencies = ['http', 'ws', 'express', 'path', 'fs']
const missingDeps = dependencies.filter((dep) => !checkDependency(dep))
if (missingDeps.length > 0) {
console.error(`Missing dependencies: ${missingDeps.join(', ')}. Server cannot start.`)
process.exit(1)
}
} catch (error) {
console.error('Error during dependency check:', error)
process.exit(1)
}
// 加载依赖项
const http = require('http')
const WebSocket = require('ws')
const express = require('express')
const path = require('path') // Need path module
// const fs = require('fs') // Commented out unused import 'fs'
const app = express()
const port = 34515 // Define the port
// 获取index.html文件的路径
function getIndexHtmlPath() {
const fs = require('fs')
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
// 尝试多个可能的路径
const possiblePaths = [
// 开发环境路径
path.join(__dirname, 'index.html'),
// 当前目录
path.join(process.cwd(), 'index.html'),
// 相对于可执行文件的路径
path.join(path.dirname(process.execPath), 'index.html'),
// 相对于可执行文件的上级目录的路径
path.join(path.dirname(path.dirname(process.execPath)), 'index.html'),
// 相对于可执行文件的resources目录的路径
path.join(path.dirname(process.execPath), 'resources', 'index.html'),
// 相对于可执行文件的resources/asr-server目录的路径
path.join(path.dirname(process.execPath), 'resources', 'asr-server', 'index.html'),
// 相对于可执行文件的asr-server目录的路径
path.join(path.dirname(process.execPath), 'asr-server', 'index.html'),
// 如果是pkg打包环境
process.pkg ? path.join(path.dirname(process.execPath), 'index.html') : null
].filter(Boolean) // 过滤掉null值
console.log('Possible index.html paths:', possiblePaths)
// 检查每个路径,返回第一个存在的文件
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
console.log(`Found index.html at: ${p}`)
return p
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
// 如果没有找到文件,返回默认路径并记录错误
console.error('Could not find index.html in any of the expected locations')
return path.join(__dirname, 'index.html') // 返回默认路径,即使它可能不存在
}
// 提供网页给浏览器
app.get('/', (req, res) => {
try {
const indexPath = getIndexHtmlPath()
console.log(`Serving index.html from: ${indexPath}`)
// 检查文件是否存在
const fs = require('fs')
if (!fs.existsSync(indexPath)) {
console.error(`Error: index.html not found at ${indexPath}`)
return res.status(404).send(`Error: index.html not found at ${indexPath}. <br>Please check the server logs.`)
}
res.sendFile(indexPath, (err) => {
if (err) {
console.error('Error sending index.html:', err)
res.status(500).send(`Error serving index.html: ${err.message}`)
}
})
} catch (error) {
console.error('Error in route handler:', error)
res.status(500).send(`Server error: ${error.message}`)
}
})
const server = http.createServer(app)
const wss = new WebSocket.Server({ server })
let browserConnection = null
let electronConnection = null
wss.on('connection', (ws) => {
console.log('[Server] WebSocket client connected') // Add log
ws.on('message', (message) => {
let data
try {
// Ensure message is treated as string before parsing
data = JSON.parse(message.toString())
console.log('[Server] Received message:', data) // Log parsed data
} catch (e) {
console.error('[Server] Failed to parse message or message is not JSON:', message.toString(), e)
return // Ignore non-JSON messages
}
// 识别客户端类型
if (data.type === 'identify') {
if (data.role === 'browser') {
browserConnection = ws
console.log('[Server] Browser identified and connected')
// Notify Electron that the browser is ready
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent browser_ready status to Electron')
}
// Notify Electron if it's already connected
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connected' }))
}
ws.on('close', () => {
console.log('[Server] Browser disconnected')
browserConnection = null
// Notify Electron
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser disconnected' }))
}
})
ws.on('error', (error) => {
console.error('[Server] Browser WebSocket error:', error)
browserConnection = null // Assume disconnected on error
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
}
})
} else if (data.role === 'electron') {
electronConnection = ws
console.log('[Server] Electron identified and connected')
// If browser is already connected when Electron connects, notify Electron immediately
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent initial browser_ready status to Electron')
}
ws.on('close', () => {
console.log('[Server] Electron disconnected')
electronConnection = null
// Maybe send stop to browser if electron disconnects?
// if (browserConnection) browserConnection.send(JSON.stringify({ type: 'stop' }));
})
ws.on('error', (error) => {
console.error('[Server] Electron WebSocket error:', error)
electronConnection = null // Assume disconnected on error
})
}
}
// Electron 控制开始/停止
else if (data.type === 'start' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying START command to browser')
browserConnection.send(JSON.stringify({ type: 'start' }))
} else {
console.log('[Server] Cannot relay START: Browser not connected')
// Optionally notify Electron back
electronConnection.send(JSON.stringify({ type: 'error', message: 'Browser not connected for ASR' }))
}
} else if (data.type === 'stop' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STOP command to browser')
browserConnection.send(JSON.stringify({ type: 'stop' }))
} else {
console.log('[Server] Cannot relay STOP: Browser not connected')
}
} else if (data.type === 'reset' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying RESET command to browser')
browserConnection.send(JSON.stringify({ type: 'reset' }))
} else {
console.log('[Server] Cannot relay RESET: Browser not connected')
}
}
// 浏览器发送识别结果
else if (data.type === 'result' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
// console.log('[Server] Relaying RESULT to Electron:', data.data); // Log less frequently if needed
electronConnection.send(JSON.stringify({ type: 'result', data: data.data }))
} else {
// console.log('[Server] Cannot relay RESULT: Electron not connected');
}
}
// 浏览器发送状态更新 (例如 'stopped')
else if (data.type === 'status' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STATUS to Electron:', data.message) // Log status being relayed
electronConnection.send(JSON.stringify({ type: 'status', message: data.message }))
} else {
console.log('[Server] Cannot relay STATUS: Electron not connected')
}
} else {
console.log('[Server] Received unknown message type or from unknown source:', data)
}
})
ws.on('error', (error) => {
// Generic error handling for connection before identification
console.error('[Server] Initial WebSocket connection error:', error)
// Attempt to clean up based on which connection it might be (if identified)
if (ws === browserConnection) {
browserConnection = null
if (electronConnection)
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
} else if (ws === electronConnection) {
electronConnection = null
}
})
})
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// Handle server errors
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

114
asr-server/standalone.js Normal file
View File

@ -0,0 +1,114 @@
/**
* 独立的ASR服务器
* 这个文件是一个简化版的server.js用于在打包后的应用中运行
*/
// 基本依赖
const http = require('http')
const express = require('express')
const path = require('path')
const fs = require('fs')
// 输出环境信息
console.log('ASR Server starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 创建Express应用
const app = express()
const port = 34515
// 提供静态文件
app.use(express.static(__dirname))
// 提供网页给浏览器
app.get('/', (req, res) => {
try {
// 尝试多个可能的路径
const possiblePaths = [
// 当前目录
path.join(__dirname, 'index.html'),
// 上级目录
path.join(__dirname, '..', 'index.html'),
// 应用根目录
path.join(process.cwd(), 'index.html')
]
console.log('Possible index.html paths:', possiblePaths)
// 查找第一个存在的文件
let indexPath = null
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
indexPath = p
console.log(`Found index.html at: ${p}`)
break
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
if (indexPath) {
res.sendFile(indexPath)
} else {
// 如果找不到文件返回一个简单的HTML页面
console.error('Could not find index.html, serving fallback page')
res.send(`
<!DOCTYPE html>
<html>
<head>
<title>ASR Server</title>
<style>
body { font-family: sans-serif; padding: 2em; }
h1 { color: #333; }
</style>
</head>
<body>
<h1>ASR Server is running</h1>
<p>This is a fallback page because the index.html file could not be found.</p>
<p>Server is running at: http://localhost:${port}</p>
<p>Current directory: ${__dirname}</p>
<p>Working directory: ${process.cwd()}</p>
</body>
</html>
`)
}
} catch (error) {
console.error('Error serving index.html:', error)
res.status(500).send(`Server error: ${error.message}`)
}
})
// 创建HTTP服务器
const server = http.createServer(app)
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// 处理服务器错误
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

View File

@ -0,0 +1,5 @@
@echo off
echo Starting ASR Server...
cd /d %~dp0
node standalone.js
pause

View File

@ -9,6 +9,8 @@ electronLanguages:
directories:
buildResources: build
files:
- out/**/*
- package.json
- '!{.vscode,.yarn,.github}'
- '!electron.vite.config.{js,ts,mjs,cjs}'
- '!{.eslintignore,.eslintrc.cjs,.prettierignore,.prettierrc.yaml,dev-app-update.yml,CHANGELOG.md,README.md}'
@ -33,9 +35,18 @@ files:
- '!node_modules/@tavily/core/node_modules/js-tiktoken'
- '!node_modules/pdf-parse/lib/pdf.js/{v1.9.426,v1.10.88,v2.0.550}'
- '!node_modules/mammoth/{mammoth.browser.js,mammoth.browser.min.js}'
asarUnpack:
asarUnpack: # Removed ASR server rules from 'files' section
- resources/**
- '**/*.{metal,exp,lib}'
extraResources: # Add extraResources to copy the prepared asr-server directory
- from: asr-server # Copy the folder from project root
to: app/asr-server # Copy TO the 'app' subfolder within resources
filter:
- '**/*' # Include everything inside
- from: resources/data # Copy the data folder with agents.json
to: data # Copy TO the 'data' subfolder within resources
filter:
- '**/*' # Include everything inside
win:
executableName: Cherry Studio
artifactName: ${productName}-${version}-${arch}-setup.${ext}

View File

@ -76,6 +76,17 @@ export default defineConfig({
},
optimizeDeps: {
exclude: []
},
build: {
rollupOptions: {
input: {
index: resolve('src/renderer/index.html')
}
},
// 复制ASR服务器文件
assetsInlineLimit: 0,
// 确保复制assets目录下的所有文件
copyPublicDir: true
}
}
})

425
index.html Normal file
View File

@ -0,0 +1,425 @@
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Cherry Studio ASR</title>
<style>
body {
font-family: sans-serif;
padding: 1em;
}
#status {
margin-top: 1em;
font-style: italic;
color: #555;
}
#result {
margin-top: 0.5em;
border: 1px solid #ccc;
padding: 0.5em;
min-height: 50px;
background: #f9f9f9;
}
</style>
</head>
<body>
<h1>浏览器语音识别中继页面</h1>
<p>这个页面需要在浏览器中保持打开,以便应用使用其语音识别功能。</p>
<div id="status">正在连接到服务器...</div>
<div id="result"></div>
<script>
const statusDiv = document.getElementById('status');
const resultDiv = document.getElementById('result');
// 尝试连接到WebSocket服务器
let ws;
let reconnectAttempts = 0;
const maxReconnectAttempts = 5;
const reconnectInterval = 2000; // 2秒
function connectWebSocket() {
try {
ws = new WebSocket('ws://localhost:34515');
ws.onopen = () => {
reconnectAttempts = 0;
updateStatus('已连接到服务器,等待指令...');
ws.send(JSON.stringify({ type: 'identify', role: 'browser' }));
};
ws.onmessage = handleMessage;
ws.onerror = (error) => {
console.error('[Browser Page] WebSocket Error:', error);
updateStatus('WebSocket 连接错误!请检查服务器是否运行。');
};
ws.onclose = () => {
console.log('[Browser Page] WebSocket Connection Closed');
updateStatus('与服务器断开连接。尝试重新连接...');
stopRecognition();
// 尝试重新连接
if (reconnectAttempts < maxReconnectAttempts) {
reconnectAttempts++;
updateStatus(`与服务器断开连接。尝试重新连接 (${reconnectAttempts}/${maxReconnectAttempts})...`);
setTimeout(connectWebSocket, reconnectInterval);
} else {
updateStatus('无法连接到服务器。请刷新页面或重启应用。');
}
};
} catch (error) {
console.error('[Browser Page] Error creating WebSocket:', error);
updateStatus('创建WebSocket连接时出错。请刷新页面或重启应用。');
}
}
// 初始连接
connectWebSocket();
let recognition = null;
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
function updateStatus(message) {
console.log(`[Browser Page Status] ${message}`);
statusDiv.textContent = message;
}
function handleMessage(event) {
let data;
try {
data = JSON.parse(event.data);
console.log('[Browser Page] Received command:', data);
} catch (e) {
console.error('[Browser Page] Received non-JSON message:', event.data);
return;
}
if (data.type === 'start') {
startRecognition();
} else if (data.type === 'stop') {
stopRecognition();
} else if (data.type === 'reset') {
// 强制重置语音识别
forceResetRecognition();
} else {
console.warn('[Browser Page] Received unknown command type:', data.type);
}
};
function setupRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:此浏览器不支持 Web Speech API。');
return false;
}
if (recognition && recognition.recognizing) {
console.log('[Browser Page] Recognition already active.');
return true;
}
recognition = new SpeechRecognition();
recognition.lang = 'zh-CN';
recognition.continuous = true;
recognition.interimResults = true;
// 增加以下设置提高语音识别的可靠性
recognition.maxAlternatives = 3; // 返回多个可能的识别结果
// 设置较短的语音识别时间,使用户能更快地看到结果
// 注意:这个属性不是标准的,可能不是所有浏览器都支持
try {
// @ts-ignore
recognition.audioStart = 0.1; // 尝试设置较低的起始音量阈值
} catch (e) {
console.log('[Browser Page] audioStart property not supported');
}
recognition.onstart = () => {
updateStatus("🎤 正在识别...");
console.log('[Browser Page] SpeechRecognition started.');
};
recognition.onresult = (event) => {
console.log('[Browser Page] Recognition result event:', event);
let interim_transcript = '';
let final_transcript = '';
// 输出识别结果的详细信息便于调试
for (let i = event.resultIndex; i < event.results.length; ++i) {
const confidence = event.results[i][0].confidence;
console.log(`[Browser Page] Result ${i}: ${event.results[i][0].transcript} (Confidence: ${confidence.toFixed(2)})`);
if (event.results[i].isFinal) {
final_transcript += event.results[i][0].transcript;
} else {
interim_transcript += event.results[i][0].transcript;
}
}
const resultText = final_transcript || interim_transcript;
resultDiv.textContent = resultText;
// 更新状态显示
if (resultText) {
updateStatus(`🎤 正在识别... (已捕捉到语音)`);
}
if (ws.readyState === WebSocket.OPEN) {
console.log(`[Browser Page] Sending ${final_transcript ? 'final' : 'interim'} result to server:`, resultText);
ws.send(JSON.stringify({ type: 'result', data: { text: resultText, isFinal: !!final_transcript } }));
}
};
recognition.onerror = (event) => {
console.error(`[Browser Page] SpeechRecognition Error - Type: ${event.error}, Message: ${event.message}`);
// 根据错误类型提供更友好的错误提示
let errorMessage = '';
switch (event.error) {
case 'no-speech':
errorMessage = '未检测到语音,请确保麦克风工作正常并尝试说话。';
// 尝试重新启动语音识别
setTimeout(() => {
if (recognition) {
try {
recognition.start();
console.log('[Browser Page] Restarting recognition after no-speech error');
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
}
}
}, 1000);
break;
case 'audio-capture':
errorMessage = '无法捕获音频,请确保麦克风已连接并已授权。';
break;
case 'not-allowed':
errorMessage = '浏览器不允许使用麦克风,请检查权限设置。';
break;
case 'network':
errorMessage = '网络错误导致语音识别失败。';
break;
case 'aborted':
errorMessage = '语音识别被用户或系统中止。';
break;
default:
errorMessage = `识别错误: ${event.error}`;
}
updateStatus(`错误: ${errorMessage}`);
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({
type: 'error',
data: {
error: event.error,
message: errorMessage || event.message || `Recognition error: ${event.error}`
}
}));
}
};
recognition.onend = () => {
console.log('[Browser Page] SpeechRecognition ended.');
// 检查是否是由于错误或用户手动停止导致的结束
const isErrorOrStopped = statusDiv.textContent.includes('错误') || statusDiv.textContent.includes('停止');
if (!isErrorOrStopped) {
// 如果不是由于错误或手动停止,则自动重新启动语音识别
updateStatus("识别暂停,正在重新启动...");
// 保存当前的recognition对象
const currentRecognition = recognition;
// 尝试重新启动语音识别
setTimeout(() => {
try {
if (currentRecognition && currentRecognition === recognition) {
currentRecognition.start();
console.log('[Browser Page] Automatically restarting recognition');
} else {
// 如果recognition对象已经变化重新创建一个
setupRecognition();
if (recognition) {
recognition.start();
console.log('[Browser Page] Created new recognition instance and started');
}
}
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
updateStatus("识别已停止。等待指令...");
}
}, 300);
} else {
updateStatus("识别已停止。等待指令...");
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'stopped' }));
}
// 只有在手动停止或错误时才重置recognition对象
recognition = null;
}
};
return true;
}
function startRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:浏览器不支持 Web Speech API。');
return;
}
// 显示正在准备的状态
updateStatus('正在准备麦克风...');
if (recognition) {
console.log('[Browser Page] Recognition already exists, stopping first.');
stopRecognition();
}
if (!setupRecognition()) return;
console.log('[Browser Page] Attempting to start recognition...');
try {
// 设置更长的超时时间,确保有足够的时间获取麦克风权限
const micPermissionTimeout = setTimeout(() => {
updateStatus('获取麦克风权限超时,请刷新页面重试。');
}, 10000); // 10秒超时
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
}
})
.then(stream => {
clearTimeout(micPermissionTimeout);
console.log('[Browser Page] Microphone access granted.');
// 检查麦克风音量级别
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
const microphone = audioContext.createMediaStreamSource(stream);
const javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
javascriptNode.onaudioprocess = function () {
const array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let values = 0;
const length = array.length;
for (let i = 0; i < length; i++) {
values += (array[i]);
}
const average = values / length;
console.log('[Browser Page] Microphone volume level:', average);
// 如果音量太低,显示提示
if (average < 5) {
updateStatus('麦克风音量很低,请说话或检查麦克风设置。');
} else {
updateStatus('🎤 正在识别...');
}
// 只检查一次就断开连接
microphone.disconnect();
analyser.disconnect();
javascriptNode.disconnect();
};
// 释放测试用的音频流
setTimeout(() => {
stream.getTracks().forEach(track => track.stop());
audioContext.close();
}, 1000);
// 启动语音识别
if (recognition) {
recognition.start();
updateStatus('🎤 正在识别...');
} else {
updateStatus('错误Recognition 实例丢失。');
console.error('[Browser Page] Recognition instance lost before start.');
}
})
.catch(err => {
clearTimeout(micPermissionTimeout);
console.error('[Browser Page] Microphone access error:', err);
let errorMsg = `无法访问麦克风 (${err.name})`;
if (err.name === 'NotAllowedError') {
errorMsg = '麦克风访问被拒绝。请在浏览器设置中允许麦克风访问权限。';
} else if (err.name === 'NotFoundError') {
errorMsg = '未找到麦克风设备。请确保麦克风已连接。';
}
updateStatus(`错误: ${errorMsg}`);
recognition = null;
});
} catch (e) {
console.error('[Browser Page] Error calling recognition.start():', e);
updateStatus(`启动识别时出错: ${e.message}`);
recognition = null;
}
}
function stopRecognition() {
if (recognition) {
console.log('[Browser Page] Stopping recognition...');
updateStatus("正在停止识别...");
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error calling recognition.stop():', e);
recognition = null;
updateStatus("停止时出错,已强制重置。");
}
} else {
console.log('[Browser Page] Recognition not active, nothing to stop.');
updateStatus("识别未运行。");
}
}
function forceResetRecognition() {
console.log('[Browser Page] Force resetting recognition...');
updateStatus("强制重置语音识别...");
// 先尝试停止当前的识别
if (recognition) {
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error stopping recognition during reset:', e);
}
}
// 强制设置为null丢弃所有后续结果
recognition = null;
// 通知服务器已重置
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'reset_complete' }));
}
updateStatus("语音识别已重置,等待新指令。");
}
</script>
</body>
</html>

View File

@ -78,6 +78,7 @@
"d3": "^7.9.0",
"diff": "^7.0.0",
"docx": "^9.0.2",
"edge-tts-node": "^1.5.7",
"electron-log": "^5.1.5",
"electron-store": "^8.2.0",
"electron-updater": "^6.3.9",
@ -90,6 +91,7 @@
"js-yaml": "^4.1.0",
"jsdom": "^26.0.0",
"markdown-it": "^14.1.0",
"node-edge-tts": "^1.2.8",
"officeparser": "^4.1.1",
"proxy-agent": "^6.5.0",
"tar": "^7.4.3",
@ -126,7 +128,7 @@
"@types/diff": "^7",
"@types/fs-extra": "^11",
"@types/js-yaml": "^4",
"@types/lodash": "^4.17.5",
"@types/lodash": "^4.17.16",
"@types/markdown-it": "^14",
"@types/md5": "^2.3.5",
"@types/node": "^18.19.9",

View File

@ -18,6 +18,17 @@ export enum IpcChannel {
App_InstallUvBinary = 'app:install-uv-binary',
App_InstallBunBinary = 'app:install-bun-binary',
// ASR Server
Asr_StartServer = 'start-asr-server',
Asr_StopServer = 'stop-asr-server',
// MsTTS
MsTTS_GetVoices = 'mstts:get-voices',
MsTTS_Synthesize = 'mstts:synthesize',
MsTTS_SynthesizeStream = 'mstts:synthesize-stream',
MsTTS_StreamData = 'mstts:stream-data',
MsTTS_StreamEnd = 'mstts:stream-end',
// Open
Open_Path = 'open:path',
Open_Website = 'open:website',

View File

@ -0,0 +1,123 @@
/**
* 内置的ASR服务器模块
* 这个文件可以直接在Electron中运行不需要外部依赖
*/
// 使用Electron内置的Node.js模块
const http = require('http')
const path = require('path')
const fs = require('fs')
// 输出环境信息
console.log('ASR Server (Embedded) starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 创建HTTP服务器
const server = http.createServer((req, res) => {
try {
if (req.url === '/' || req.url === '/index.html') {
// 尝试多个可能的路径
const possiblePaths = [
// 当前目录
path.join(__dirname, 'index.html'),
// 上级目录
path.join(__dirname, '..', 'index.html'),
// 应用根目录
path.join(process.cwd(), 'index.html')
]
console.log('Possible index.html paths:', possiblePaths)
// 查找第一个存在的文件
let indexPath = null
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
indexPath = p
console.log(`Found index.html at: ${p}`)
break
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
if (indexPath) {
// 读取文件内容并发送
fs.readFile(indexPath, (err, data) => {
if (err) {
console.error('Error reading index.html:', err)
res.writeHead(500)
res.end('Error reading index.html')
return
}
res.writeHead(200, { 'Content-Type': 'text/html' })
res.end(data)
})
} else {
// 如果找不到文件返回一个简单的HTML页面
console.error('Could not find index.html, serving fallback page')
res.writeHead(200, { 'Content-Type': 'text/html' })
res.end(`
<!DOCTYPE html>
<html>
<head>
<title>ASR Server</title>
<style>
body { font-family: sans-serif; padding: 2em; }
h1 { color: #333; }
</style>
</head>
<body>
<h1>ASR Server is running</h1>
<p>This is a fallback page because the index.html file could not be found.</p>
<p>Server is running at: http://localhost:34515</p>
<p>Current directory: ${__dirname}</p>
<p>Working directory: ${process.cwd()}</p>
</body>
</html>
`)
}
} else {
// 处理其他请求
res.writeHead(404)
res.end('Not found')
}
} catch (error) {
console.error('Error handling request:', error)
res.writeHead(500)
res.end('Server error')
}
})
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
const port = 34515
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// 处理服务器错误
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

View File

@ -0,0 +1,425 @@
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Cherry Studio ASR</title>
<style>
body {
font-family: sans-serif;
padding: 1em;
}
#status {
margin-top: 1em;
font-style: italic;
color: #555;
}
#result {
margin-top: 0.5em;
border: 1px solid #ccc;
padding: 0.5em;
min-height: 50px;
background: #f9f9f9;
}
</style>
</head>
<body>
<h1>浏览器语音识别中继页面</h1>
<p>这个页面需要在浏览器中保持打开,以便应用使用其语音识别功能。</p>
<div id="status">正在连接到服务器...</div>
<div id="result"></div>
<script>
const statusDiv = document.getElementById('status');
const resultDiv = document.getElementById('result');
// 尝试连接到WebSocket服务器
let ws;
let reconnectAttempts = 0;
const maxReconnectAttempts = 5;
const reconnectInterval = 2000; // 2秒
function connectWebSocket() {
try {
ws = new WebSocket('ws://localhost:34515');
ws.onopen = () => {
reconnectAttempts = 0;
updateStatus('已连接到服务器,等待指令...');
ws.send(JSON.stringify({ type: 'identify', role: 'browser' }));
};
ws.onmessage = handleMessage;
ws.onerror = (error) => {
console.error('[Browser Page] WebSocket Error:', error);
updateStatus('WebSocket 连接错误!请检查服务器是否运行。');
};
ws.onclose = () => {
console.log('[Browser Page] WebSocket Connection Closed');
updateStatus('与服务器断开连接。尝试重新连接...');
stopRecognition();
// 尝试重新连接
if (reconnectAttempts < maxReconnectAttempts) {
reconnectAttempts++;
updateStatus(`与服务器断开连接。尝试重新连接 (${reconnectAttempts}/${maxReconnectAttempts})...`);
setTimeout(connectWebSocket, reconnectInterval);
} else {
updateStatus('无法连接到服务器。请刷新页面或重启应用。');
}
};
} catch (error) {
console.error('[Browser Page] Error creating WebSocket:', error);
updateStatus('创建WebSocket连接时出错。请刷新页面或重启应用。');
}
}
// 初始连接
connectWebSocket();
let recognition = null;
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
function updateStatus(message) {
console.log(`[Browser Page Status] ${message}`);
statusDiv.textContent = message;
}
function handleMessage(event) {
let data;
try {
data = JSON.parse(event.data);
console.log('[Browser Page] Received command:', data);
} catch (e) {
console.error('[Browser Page] Received non-JSON message:', event.data);
return;
}
if (data.type === 'start') {
startRecognition();
} else if (data.type === 'stop') {
stopRecognition();
} else if (data.type === 'reset') {
// 强制重置语音识别
forceResetRecognition();
} else {
console.warn('[Browser Page] Received unknown command type:', data.type);
}
};
function setupRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:此浏览器不支持 Web Speech API。');
return false;
}
if (recognition && recognition.recognizing) {
console.log('[Browser Page] Recognition already active.');
return true;
}
recognition = new SpeechRecognition();
recognition.lang = 'zh-CN';
recognition.continuous = true;
recognition.interimResults = true;
// 增加以下设置提高语音识别的可靠性
recognition.maxAlternatives = 3; // 返回多个可能的识别结果
// 设置较短的语音识别时间,使用户能更快地看到结果
// 注意:这个属性不是标准的,可能不是所有浏览器都支持
try {
// @ts-ignore
recognition.audioStart = 0.1; // 尝试设置较低的起始音量阈值
} catch (e) {
console.log('[Browser Page] audioStart property not supported');
}
recognition.onstart = () => {
updateStatus("🎤 正在识别...");
console.log('[Browser Page] SpeechRecognition started.');
};
recognition.onresult = (event) => {
console.log('[Browser Page] Recognition result event:', event);
let interim_transcript = '';
let final_transcript = '';
// 输出识别结果的详细信息便于调试
for (let i = event.resultIndex; i < event.results.length; ++i) {
const confidence = event.results[i][0].confidence;
console.log(`[Browser Page] Result ${i}: ${event.results[i][0].transcript} (Confidence: ${confidence.toFixed(2)})`);
if (event.results[i].isFinal) {
final_transcript += event.results[i][0].transcript;
} else {
interim_transcript += event.results[i][0].transcript;
}
}
const resultText = final_transcript || interim_transcript;
resultDiv.textContent = resultText;
// 更新状态显示
if (resultText) {
updateStatus(`🎤 正在识别... (已捕捉到语音)`);
}
if (ws.readyState === WebSocket.OPEN) {
console.log(`[Browser Page] Sending ${final_transcript ? 'final' : 'interim'} result to server:`, resultText);
ws.send(JSON.stringify({ type: 'result', data: { text: resultText, isFinal: !!final_transcript } }));
}
};
recognition.onerror = (event) => {
console.error(`[Browser Page] SpeechRecognition Error - Type: ${event.error}, Message: ${event.message}`);
// 根据错误类型提供更友好的错误提示
let errorMessage = '';
switch (event.error) {
case 'no-speech':
errorMessage = '未检测到语音,请确保麦克风工作正常并尝试说话。';
// 尝试重新启动语音识别
setTimeout(() => {
if (recognition) {
try {
recognition.start();
console.log('[Browser Page] Restarting recognition after no-speech error');
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
}
}
}, 1000);
break;
case 'audio-capture':
errorMessage = '无法捕获音频,请确保麦克风已连接并已授权。';
break;
case 'not-allowed':
errorMessage = '浏览器不允许使用麦克风,请检查权限设置。';
break;
case 'network':
errorMessage = '网络错误导致语音识别失败。';
break;
case 'aborted':
errorMessage = '语音识别被用户或系统中止。';
break;
default:
errorMessage = `识别错误: ${event.error}`;
}
updateStatus(`错误: ${errorMessage}`);
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({
type: 'error',
data: {
error: event.error,
message: errorMessage || event.message || `Recognition error: ${event.error}`
}
}));
}
};
recognition.onend = () => {
console.log('[Browser Page] SpeechRecognition ended.');
// 检查是否是由于错误或用户手动停止导致的结束
const isErrorOrStopped = statusDiv.textContent.includes('错误') || statusDiv.textContent.includes('停止');
if (!isErrorOrStopped) {
// 如果不是由于错误或手动停止,则自动重新启动语音识别
updateStatus("识别暂停,正在重新启动...");
// 保存当前的recognition对象
const currentRecognition = recognition;
// 尝试重新启动语音识别
setTimeout(() => {
try {
if (currentRecognition && currentRecognition === recognition) {
currentRecognition.start();
console.log('[Browser Page] Automatically restarting recognition');
} else {
// 如果recognition对象已经变化重新创建一个
setupRecognition();
if (recognition) {
recognition.start();
console.log('[Browser Page] Created new recognition instance and started');
}
}
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
updateStatus("识别已停止。等待指令...");
}
}, 300);
} else {
updateStatus("识别已停止。等待指令...");
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'stopped' }));
}
// 只有在手动停止或错误时才重置recognition对象
recognition = null;
}
};
return true;
}
function startRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:浏览器不支持 Web Speech API。');
return;
}
// 显示正在准备的状态
updateStatus('正在准备麦克风...');
if (recognition) {
console.log('[Browser Page] Recognition already exists, stopping first.');
stopRecognition();
}
if (!setupRecognition()) return;
console.log('[Browser Page] Attempting to start recognition...');
try {
// 设置更长的超时时间,确保有足够的时间获取麦克风权限
const micPermissionTimeout = setTimeout(() => {
updateStatus('获取麦克风权限超时,请刷新页面重试。');
}, 10000); // 10秒超时
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
}
})
.then(stream => {
clearTimeout(micPermissionTimeout);
console.log('[Browser Page] Microphone access granted.');
// 检查麦克风音量级别
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
const microphone = audioContext.createMediaStreamSource(stream);
const javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
javascriptNode.onaudioprocess = function () {
const array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let values = 0;
const length = array.length;
for (let i = 0; i < length; i++) {
values += (array[i]);
}
const average = values / length;
console.log('[Browser Page] Microphone volume level:', average);
// 如果音量太低,显示提示
if (average < 5) {
updateStatus('麦克风音量很低,请说话或检查麦克风设置。');
} else {
updateStatus('🎤 正在识别...');
}
// 只检查一次就断开连接
microphone.disconnect();
analyser.disconnect();
javascriptNode.disconnect();
};
// 释放测试用的音频流
setTimeout(() => {
stream.getTracks().forEach(track => track.stop());
audioContext.close();
}, 1000);
// 启动语音识别
if (recognition) {
recognition.start();
updateStatus('🎤 正在识别...');
} else {
updateStatus('错误Recognition 实例丢失。');
console.error('[Browser Page] Recognition instance lost before start.');
}
})
.catch(err => {
clearTimeout(micPermissionTimeout);
console.error('[Browser Page] Microphone access error:', err);
let errorMsg = `无法访问麦克风 (${err.name})`;
if (err.name === 'NotAllowedError') {
errorMsg = '麦克风访问被拒绝。请在浏览器设置中允许麦克风访问权限。';
} else if (err.name === 'NotFoundError') {
errorMsg = '未找到麦克风设备。请确保麦克风已连接。';
}
updateStatus(`错误: ${errorMsg}`);
recognition = null;
});
} catch (e) {
console.error('[Browser Page] Error calling recognition.start():', e);
updateStatus(`启动识别时出错: ${e.message}`);
recognition = null;
}
}
function stopRecognition() {
if (recognition) {
console.log('[Browser Page] Stopping recognition...');
updateStatus("正在停止识别...");
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error calling recognition.stop():', e);
recognition = null;
updateStatus("停止时出错,已强制重置。");
}
} else {
console.log('[Browser Page] Recognition not active, nothing to stop.');
updateStatus("识别未运行。");
}
}
function forceResetRecognition() {
console.log('[Browser Page] Force resetting recognition...');
updateStatus("强制重置语音识别...");
// 先尝试停止当前的识别
if (recognition) {
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error stopping recognition during reset:', e);
}
}
// 强制设置为null丢弃所有后续结果
recognition = null;
// 通知服务器已重置
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'reset_complete' }));
}
updateStatus("语音识别已重置,等待新指令。");
}
</script>
</body>
</html>

854
public/asr-server/package-lock.json generated Normal file
View File

@ -0,0 +1,854 @@
{
"name": "cherry-asr-server",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "cherry-asr-server",
"version": "1.0.0",
"dependencies": {
"express": "^4.18.2",
"ws": "^8.13.0"
}
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmmirror.com/accepts/-/accepts-1.3.8.tgz",
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"license": "MIT",
"dependencies": {
"mime-types": "~2.1.34",
"negotiator": "0.6.3"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/array-flatten": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
"license": "MIT"
},
"node_modules/body-parser": {
"version": "1.20.3",
"resolved": "https://registry.npmmirror.com/body-parser/-/body-parser-1.20.3.tgz",
"integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
"qs": "6.13.0",
"raw-body": "2.5.2",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmmirror.com/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/call-bind-apply-helpers": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/call-bound": {
"version": "1.0.4",
"resolved": "https://registry.npmmirror.com/call-bound/-/call-bound-1.0.4.tgz",
"integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.2",
"get-intrinsic": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmmirror.com/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"license": "MIT",
"dependencies": {
"safe-buffer": "5.2.1"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmmirror.com/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie": {
"version": "0.7.1",
"resolved": "https://registry.npmmirror.com/cookie/-/cookie-0.7.1.tgz",
"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie-signature": {
"version": "1.0.6",
"resolved": "https://registry.npmmirror.com/cookie-signature/-/cookie-signature-1.0.6.tgz",
"integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==",
"license": "MIT"
},
"node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"license": "MIT",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/depd": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/depd/-/depd-2.0.0.tgz",
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/destroy": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/destroy/-/destroy-1.2.0.tgz",
"integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
"license": "MIT",
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/dunder-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/dunder-proto/-/dunder-proto-1.0.1.tgz",
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.1",
"es-errors": "^1.3.0",
"gopd": "^1.2.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
"license": "MIT"
},
"node_modules/encodeurl": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/encodeurl/-/encodeurl-2.0.0.tgz",
"integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/es-define-property": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz",
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-object-atoms": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/escape-html": {
"version": "1.0.3",
"resolved": "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz",
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
"license": "MIT"
},
"node_modules/etag": {
"version": "1.8.1",
"resolved": "https://registry.npmmirror.com/etag/-/etag-1.8.1.tgz",
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/express": {
"version": "4.21.2",
"resolved": "https://registry.npmmirror.com/express/-/express-4.21.2.tgz",
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
"license": "MIT",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "1.20.3",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.7.1",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "1.3.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"merge-descriptors": "1.0.3",
"methods": "~1.1.2",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.12",
"proxy-addr": "~2.0.7",
"qs": "6.13.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.19.0",
"serve-static": "1.16.2",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.10.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"node_modules/finalhandler": {
"version": "1.3.1",
"resolved": "https://registry.npmmirror.com/finalhandler/-/finalhandler-1.3.1.tgz",
"integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"statuses": "2.0.1",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmmirror.com/forwarded/-/forwarded-0.2.0.tgz",
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmmirror.com/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-intrinsic": {
"version": "1.3.0",
"resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.2",
"es-define-property": "^1.0.1",
"es-errors": "^1.3.0",
"es-object-atoms": "^1.1.1",
"function-bind": "^1.1.2",
"get-proto": "^1.0.1",
"gopd": "^1.2.0",
"has-symbols": "^1.1.0",
"hasown": "^2.0.2",
"math-intrinsics": "^1.1.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/get-proto/-/get-proto-1.0.1.tgz",
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
"license": "MIT",
"dependencies": {
"dunder-proto": "^1.0.1",
"es-object-atoms": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz",
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz",
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz",
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"license": "MIT",
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/http-errors": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/http-errors/-/http-errors-2.0.0.tgz",
"integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
"license": "MIT",
"dependencies": {
"depd": "2.0.0",
"inherits": "2.0.4",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"toidentifier": "1.0.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"license": "ISC"
},
"node_modules/ipaddr.js": {
"version": "1.9.1",
"resolved": "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
"license": "MIT",
"engines": {
"node": ">= 0.10"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmmirror.com/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/merge-descriptors": {
"version": "1.0.3",
"resolved": "https://registry.npmmirror.com/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
"integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/methods": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/methods/-/methods-1.1.2.tgz",
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime": {
"version": "1.6.0",
"resolved": "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"license": "MIT",
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"license": "MIT"
},
"node_modules/negotiator": {
"version": "0.6.3",
"resolved": "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.3.tgz",
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/object-inspect": {
"version": "1.13.4",
"resolved": "https://registry.npmmirror.com/object-inspect/-/object-inspect-1.13.4.tgz",
"integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmmirror.com/on-finished/-/on-finished-2.4.1.tgz",
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
"license": "MIT",
"dependencies": {
"ee-first": "1.1.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/parseurl": {
"version": "1.3.3",
"resolved": "https://registry.npmmirror.com/parseurl/-/parseurl-1.3.3.tgz",
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/path-to-regexp": {
"version": "0.1.12",
"resolved": "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT"
},
"node_modules/proxy-addr": {
"version": "2.0.7",
"resolved": "https://registry.npmmirror.com/proxy-addr/-/proxy-addr-2.0.7.tgz",
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
"license": "MIT",
"dependencies": {
"forwarded": "0.2.0",
"ipaddr.js": "1.9.1"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/qs": {
"version": "6.13.0",
"resolved": "https://registry.npmmirror.com/qs/-/qs-6.13.0.tgz",
"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
"license": "BSD-3-Clause",
"dependencies": {
"side-channel": "^1.0.6"
},
"engines": {
"node": ">=0.6"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/raw-body": {
"version": "2.5.2",
"resolved": "https://registry.npmmirror.com/raw-body/-/raw-body-2.5.2.tgz",
"integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"license": "MIT"
},
"node_modules/send": {
"version": "0.19.0",
"resolved": "https://registry.npmmirror.com/send/-/send-0.19.0.tgz",
"integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"mime": "1.6.0",
"ms": "2.1.3",
"on-finished": "2.4.1",
"range-parser": "~1.2.1",
"statuses": "2.0.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/send/node_modules/encodeurl": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/encodeurl/-/encodeurl-1.0.2.tgz",
"integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/send/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/serve-static": {
"version": "1.16.2",
"resolved": "https://registry.npmmirror.com/serve-static/-/serve-static-1.16.2.tgz",
"integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==",
"license": "MIT",
"dependencies": {
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
"send": "0.19.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/setprototypeof": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/setprototypeof/-/setprototypeof-1.2.0.tgz",
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
"license": "ISC"
},
"node_modules/side-channel": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/side-channel/-/side-channel-1.1.0.tgz",
"integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3",
"side-channel-list": "^1.0.0",
"side-channel-map": "^1.0.1",
"side-channel-weakmap": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-list": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/side-channel-list/-/side-channel-list-1.0.0.tgz",
"integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-map": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/side-channel-map/-/side-channel-map-1.0.1.tgz",
"integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-weakmap": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
"integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3",
"side-channel-map": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/statuses": {
"version": "2.0.1",
"resolved": "https://registry.npmmirror.com/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/toidentifier": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/toidentifier/-/toidentifier-1.0.1.tgz",
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
"license": "MIT",
"engines": {
"node": ">=0.6"
}
},
"node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmmirror.com/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"license": "MIT",
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
"license": "MIT",
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/vary": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/vary/-/vary-1.1.2.tgz",
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/ws": {
"version": "8.18.1",
"resolved": "https://registry.npmmirror.com/ws/-/ws-8.18.1.tgz",
"integrity": "sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w==",
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
}
}
}

View File

@ -0,0 +1,10 @@
{
"name": "cherry-asr-server",
"version": "1.0.0",
"description": "Cherry Studio ASR Server",
"main": "server.js",
"dependencies": {
"express": "^4.18.2",
"ws": "^8.13.0"
}
}

269
public/asr-server/server.js Normal file
View File

@ -0,0 +1,269 @@
// 检查依赖项
try {
console.log('ASR Server starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 检查必要的依赖项
const checkDependency = (name) => {
try {
require(name) // Removed unused variable 'module'
console.log(`Successfully loaded dependency: ${name}`)
return true
} catch (error) {
console.error(`Failed to load dependency: ${name}`, error.message)
return false
}
}
// 检查所有必要的依赖项
const dependencies = ['http', 'ws', 'express', 'path', 'fs']
const missingDeps = dependencies.filter((dep) => !checkDependency(dep))
if (missingDeps.length > 0) {
console.error(`Missing dependencies: ${missingDeps.join(', ')}. Server cannot start.`)
process.exit(1)
}
} catch (error) {
console.error('Error during dependency check:', error)
process.exit(1)
}
// 加载依赖项
const http = require('http')
const WebSocket = require('ws')
const express = require('express')
const path = require('path') // Need path module
// const fs = require('fs') // Commented out unused import 'fs'
const app = express()
const port = 34515 // Define the port
// 获取index.html文件的路径
function getIndexHtmlPath() {
const fs = require('fs')
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
// 尝试多个可能的路径
const possiblePaths = [
// 开发环境路径
path.join(__dirname, 'index.html'),
// 当前目录
path.join(process.cwd(), 'index.html'),
// 相对于可执行文件的路径
path.join(path.dirname(process.execPath), 'index.html'),
// 相对于可执行文件的上级目录的路径
path.join(path.dirname(path.dirname(process.execPath)), 'index.html'),
// 相对于可执行文件的resources目录的路径
path.join(path.dirname(process.execPath), 'resources', 'index.html'),
// 相对于可执行文件的resources/asr-server目录的路径
path.join(path.dirname(process.execPath), 'resources', 'asr-server', 'index.html'),
// 相对于可执行文件的asr-server目录的路径
path.join(path.dirname(process.execPath), 'asr-server', 'index.html'),
// 如果是pkg打包环境
process.pkg ? path.join(path.dirname(process.execPath), 'index.html') : null
].filter(Boolean) // 过滤掉null值
console.log('Possible index.html paths:', possiblePaths)
// 检查每个路径,返回第一个存在的文件
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
console.log(`Found index.html at: ${p}`)
return p
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
// 如果没有找到文件,返回默认路径并记录错误
console.error('Could not find index.html in any of the expected locations')
return path.join(__dirname, 'index.html') // 返回默认路径,即使它可能不存在
}
// 提供网页给浏览器
app.get('/', (req, res) => {
try {
const indexPath = getIndexHtmlPath()
console.log(`Serving index.html from: ${indexPath}`)
// 检查文件是否存在
const fs = require('fs')
if (!fs.existsSync(indexPath)) {
console.error(`Error: index.html not found at ${indexPath}`)
return res.status(404).send(`Error: index.html not found at ${indexPath}. <br>Please check the server logs.`)
}
res.sendFile(indexPath, (err) => {
if (err) {
console.error('Error sending index.html:', err)
res.status(500).send(`Error serving index.html: ${err.message}`)
}
})
} catch (error) {
console.error('Error in route handler:', error)
res.status(500).send(`Server error: ${error.message}`)
}
})
const server = http.createServer(app)
const wss = new WebSocket.Server({ server })
let browserConnection = null
let electronConnection = null
wss.on('connection', (ws) => {
console.log('[Server] WebSocket client connected') // Add log
ws.on('message', (message) => {
let data
try {
// Ensure message is treated as string before parsing
data = JSON.parse(message.toString())
console.log('[Server] Received message:', data) // Log parsed data
} catch (e) {
console.error('[Server] Failed to parse message or message is not JSON:', message.toString(), e)
return // Ignore non-JSON messages
}
// 识别客户端类型
if (data.type === 'identify') {
if (data.role === 'browser') {
browserConnection = ws
console.log('[Server] Browser identified and connected')
// Notify Electron that the browser is ready
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent browser_ready status to Electron')
}
// Notify Electron if it's already connected
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connected' }))
}
ws.on('close', () => {
console.log('[Server] Browser disconnected')
browserConnection = null
// Notify Electron
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser disconnected' }))
}
})
ws.on('error', (error) => {
console.error('[Server] Browser WebSocket error:', error)
browserConnection = null // Assume disconnected on error
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
}
})
} else if (data.role === 'electron') {
electronConnection = ws
console.log('[Server] Electron identified and connected')
// If browser is already connected when Electron connects, notify Electron immediately
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent initial browser_ready status to Electron')
}
ws.on('close', () => {
console.log('[Server] Electron disconnected')
electronConnection = null
// Maybe send stop to browser if electron disconnects?
// if (browserConnection) browserConnection.send(JSON.stringify({ type: 'stop' }));
})
ws.on('error', (error) => {
console.error('[Server] Electron WebSocket error:', error)
electronConnection = null // Assume disconnected on error
})
}
}
// Electron 控制开始/停止
else if (data.type === 'start' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying START command to browser')
browserConnection.send(JSON.stringify({ type: 'start' }))
} else {
console.log('[Server] Cannot relay START: Browser not connected')
// Optionally notify Electron back
electronConnection.send(JSON.stringify({ type: 'error', message: 'Browser not connected for ASR' }))
}
} else if (data.type === 'stop' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STOP command to browser')
browserConnection.send(JSON.stringify({ type: 'stop' }))
} else {
console.log('[Server] Cannot relay STOP: Browser not connected')
}
} else if (data.type === 'reset' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying RESET command to browser')
browserConnection.send(JSON.stringify({ type: 'reset' }))
} else {
console.log('[Server] Cannot relay RESET: Browser not connected')
}
}
// 浏览器发送识别结果
else if (data.type === 'result' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
// console.log('[Server] Relaying RESULT to Electron:', data.data); // Log less frequently if needed
electronConnection.send(JSON.stringify({ type: 'result', data: data.data }))
} else {
// console.log('[Server] Cannot relay RESULT: Electron not connected');
}
}
// 浏览器发送状态更新 (例如 'stopped')
else if (data.type === 'status' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STATUS to Electron:', data.message) // Log status being relayed
electronConnection.send(JSON.stringify({ type: 'status', message: data.message }))
} else {
console.log('[Server] Cannot relay STATUS: Electron not connected')
}
} else {
console.log('[Server] Received unknown message type or from unknown source:', data)
}
})
ws.on('error', (error) => {
// Generic error handling for connection before identification
console.error('[Server] Initial WebSocket connection error:', error)
// Attempt to clean up based on which connection it might be (if identified)
if (ws === browserConnection) {
browserConnection = null
if (electronConnection)
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
} else if (ws === electronConnection) {
electronConnection = null
}
})
})
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// Handle server errors
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

View File

@ -0,0 +1,114 @@
/**
* 独立的ASR服务器
* 这个文件是一个简化版的server.js用于在打包后的应用中运行
*/
// 基本依赖
const http = require('http')
const express = require('express')
const path = require('path')
const fs = require('fs')
// 输出环境信息
console.log('ASR Server starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 创建Express应用
const app = express()
const port = 34515
// 提供静态文件
app.use(express.static(__dirname))
// 提供网页给浏览器
app.get('/', (req, res) => {
try {
// 尝试多个可能的路径
const possiblePaths = [
// 当前目录
path.join(__dirname, 'index.html'),
// 上级目录
path.join(__dirname, '..', 'index.html'),
// 应用根目录
path.join(process.cwd(), 'index.html')
]
console.log('Possible index.html paths:', possiblePaths)
// 查找第一个存在的文件
let indexPath = null
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
indexPath = p
console.log(`Found index.html at: ${p}`)
break
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
if (indexPath) {
res.sendFile(indexPath)
} else {
// 如果找不到文件返回一个简单的HTML页面
console.error('Could not find index.html, serving fallback page')
res.send(`
<!DOCTYPE html>
<html>
<head>
<title>ASR Server</title>
<style>
body { font-family: sans-serif; padding: 2em; }
h1 { color: #333; }
</style>
</head>
<body>
<h1>ASR Server is running</h1>
<p>This is a fallback page because the index.html file could not be found.</p>
<p>Server is running at: http://localhost:${port}</p>
<p>Current directory: ${__dirname}</p>
<p>Working directory: ${process.cwd()}</p>
</body>
</html>
`)
}
} catch (error) {
console.error('Error serving index.html:', error)
res.status(500).send(`Server error: ${error.message}`)
}
})
// 创建HTTP服务器
const server = http.createServer(app)
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// 处理服务器错误
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

View File

@ -0,0 +1,5 @@
@echo off
echo Starting ASR Server...
cd /d %~dp0
node standalone.js
pause

View File

@ -0,0 +1,123 @@
/**
* 内置的ASR服务器模块
* 这个文件可以直接在Electron中运行不需要外部依赖
*/
// 使用Electron内置的Node.js模块
const http = require('http')
const path = require('path')
const fs = require('fs')
// 输出环境信息
console.log('ASR Server (Embedded) starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 创建HTTP服务器
const server = http.createServer((req, res) => {
try {
if (req.url === '/' || req.url === '/index.html') {
// 尝试多个可能的路径
const possiblePaths = [
// 当前目录
path.join(__dirname, 'index.html'),
// 上级目录
path.join(__dirname, '..', 'index.html'),
// 应用根目录
path.join(process.cwd(), 'index.html')
]
console.log('Possible index.html paths:', possiblePaths)
// 查找第一个存在的文件
let indexPath = null
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
indexPath = p
console.log(`Found index.html at: ${p}`)
break
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
if (indexPath) {
// 读取文件内容并发送
fs.readFile(indexPath, (err, data) => {
if (err) {
console.error('Error reading index.html:', err)
res.writeHead(500)
res.end('Error reading index.html')
return
}
res.writeHead(200, { 'Content-Type': 'text/html' })
res.end(data)
})
} else {
// 如果找不到文件返回一个简单的HTML页面
console.error('Could not find index.html, serving fallback page')
res.writeHead(200, { 'Content-Type': 'text/html' })
res.end(`
<!DOCTYPE html>
<html>
<head>
<title>ASR Server</title>
<style>
body { font-family: sans-serif; padding: 2em; }
h1 { color: #333; }
</style>
</head>
<body>
<h1>ASR Server is running</h1>
<p>This is a fallback page because the index.html file could not be found.</p>
<p>Server is running at: http://localhost:34515</p>
<p>Current directory: ${__dirname}</p>
<p>Working directory: ${process.cwd()}</p>
</body>
</html>
`)
}
} else {
// 处理其他请求
res.writeHead(404)
res.end('Not found')
}
} catch (error) {
console.error('Error handling request:', error)
res.writeHead(500)
res.end('Server error')
}
})
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
const port = 34515
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// 处理服务器错误
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

View File

@ -0,0 +1,425 @@
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Cherry Studio ASR</title>
<style>
body {
font-family: sans-serif;
padding: 1em;
}
#status {
margin-top: 1em;
font-style: italic;
color: #555;
}
#result {
margin-top: 0.5em;
border: 1px solid #ccc;
padding: 0.5em;
min-height: 50px;
background: #f9f9f9;
}
</style>
</head>
<body>
<h1>浏览器语音识别中继页面</h1>
<p>这个页面需要在浏览器中保持打开,以便应用使用其语音识别功能。</p>
<div id="status">正在连接到服务器...</div>
<div id="result"></div>
<script>
const statusDiv = document.getElementById('status');
const resultDiv = document.getElementById('result');
// 尝试连接到WebSocket服务器
let ws;
let reconnectAttempts = 0;
const maxReconnectAttempts = 5;
const reconnectInterval = 2000; // 2秒
function connectWebSocket() {
try {
ws = new WebSocket('ws://localhost:34515');
ws.onopen = () => {
reconnectAttempts = 0;
updateStatus('已连接到服务器,等待指令...');
ws.send(JSON.stringify({ type: 'identify', role: 'browser' }));
};
ws.onmessage = handleMessage;
ws.onerror = (error) => {
console.error('[Browser Page] WebSocket Error:', error);
updateStatus('WebSocket 连接错误!请检查服务器是否运行。');
};
ws.onclose = () => {
console.log('[Browser Page] WebSocket Connection Closed');
updateStatus('与服务器断开连接。尝试重新连接...');
stopRecognition();
// 尝试重新连接
if (reconnectAttempts < maxReconnectAttempts) {
reconnectAttempts++;
updateStatus(`与服务器断开连接。尝试重新连接 (${reconnectAttempts}/${maxReconnectAttempts})...`);
setTimeout(connectWebSocket, reconnectInterval);
} else {
updateStatus('无法连接到服务器。请刷新页面或重启应用。');
}
};
} catch (error) {
console.error('[Browser Page] Error creating WebSocket:', error);
updateStatus('创建WebSocket连接时出错。请刷新页面或重启应用。');
}
}
// 初始连接
connectWebSocket();
let recognition = null;
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
function updateStatus(message) {
console.log(`[Browser Page Status] ${message}`);
statusDiv.textContent = message;
}
function handleMessage(event) {
let data;
try {
data = JSON.parse(event.data);
console.log('[Browser Page] Received command:', data);
} catch (e) {
console.error('[Browser Page] Received non-JSON message:', event.data);
return;
}
if (data.type === 'start') {
startRecognition();
} else if (data.type === 'stop') {
stopRecognition();
} else if (data.type === 'reset') {
// 强制重置语音识别
forceResetRecognition();
} else {
console.warn('[Browser Page] Received unknown command type:', data.type);
}
};
function setupRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:此浏览器不支持 Web Speech API。');
return false;
}
if (recognition && recognition.recognizing) {
console.log('[Browser Page] Recognition already active.');
return true;
}
recognition = new SpeechRecognition();
recognition.lang = 'zh-CN';
recognition.continuous = true;
recognition.interimResults = true;
// 增加以下设置提高语音识别的可靠性
recognition.maxAlternatives = 3; // 返回多个可能的识别结果
// 设置较短的语音识别时间,使用户能更快地看到结果
// 注意:这个属性不是标准的,可能不是所有浏览器都支持
try {
// @ts-ignore
recognition.audioStart = 0.1; // 尝试设置较低的起始音量阈值
} catch (e) {
console.log('[Browser Page] audioStart property not supported');
}
recognition.onstart = () => {
updateStatus("🎤 正在识别...");
console.log('[Browser Page] SpeechRecognition started.');
};
recognition.onresult = (event) => {
console.log('[Browser Page] Recognition result event:', event);
let interim_transcript = '';
let final_transcript = '';
// 输出识别结果的详细信息便于调试
for (let i = event.resultIndex; i < event.results.length; ++i) {
const confidence = event.results[i][0].confidence;
console.log(`[Browser Page] Result ${i}: ${event.results[i][0].transcript} (Confidence: ${confidence.toFixed(2)})`);
if (event.results[i].isFinal) {
final_transcript += event.results[i][0].transcript;
} else {
interim_transcript += event.results[i][0].transcript;
}
}
const resultText = final_transcript || interim_transcript;
resultDiv.textContent = resultText;
// 更新状态显示
if (resultText) {
updateStatus(`🎤 正在识别... (已捕捉到语音)`);
}
if (ws.readyState === WebSocket.OPEN) {
console.log(`[Browser Page] Sending ${final_transcript ? 'final' : 'interim'} result to server:`, resultText);
ws.send(JSON.stringify({ type: 'result', data: { text: resultText, isFinal: !!final_transcript } }));
}
};
recognition.onerror = (event) => {
console.error(`[Browser Page] SpeechRecognition Error - Type: ${event.error}, Message: ${event.message}`);
// 根据错误类型提供更友好的错误提示
let errorMessage = '';
switch (event.error) {
case 'no-speech':
errorMessage = '未检测到语音,请确保麦克风工作正常并尝试说话。';
// 尝试重新启动语音识别
setTimeout(() => {
if (recognition) {
try {
recognition.start();
console.log('[Browser Page] Restarting recognition after no-speech error');
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
}
}
}, 1000);
break;
case 'audio-capture':
errorMessage = '无法捕获音频,请确保麦克风已连接并已授权。';
break;
case 'not-allowed':
errorMessage = '浏览器不允许使用麦克风,请检查权限设置。';
break;
case 'network':
errorMessage = '网络错误导致语音识别失败。';
break;
case 'aborted':
errorMessage = '语音识别被用户或系统中止。';
break;
default:
errorMessage = `识别错误: ${event.error}`;
}
updateStatus(`错误: ${errorMessage}`);
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({
type: 'error',
data: {
error: event.error,
message: errorMessage || event.message || `Recognition error: ${event.error}`
}
}));
}
};
recognition.onend = () => {
console.log('[Browser Page] SpeechRecognition ended.');
// 检查是否是由于错误或用户手动停止导致的结束
const isErrorOrStopped = statusDiv.textContent.includes('错误') || statusDiv.textContent.includes('停止');
if (!isErrorOrStopped) {
// 如果不是由于错误或手动停止,则自动重新启动语音识别
updateStatus("识别暂停,正在重新启动...");
// 保存当前的recognition对象
const currentRecognition = recognition;
// 尝试重新启动语音识别
setTimeout(() => {
try {
if (currentRecognition && currentRecognition === recognition) {
currentRecognition.start();
console.log('[Browser Page] Automatically restarting recognition');
} else {
// 如果recognition对象已经变化重新创建一个
setupRecognition();
if (recognition) {
recognition.start();
console.log('[Browser Page] Created new recognition instance and started');
}
}
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
updateStatus("识别已停止。等待指令...");
}
}, 300);
} else {
updateStatus("识别已停止。等待指令...");
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'stopped' }));
}
// 只有在手动停止或错误时才重置recognition对象
recognition = null;
}
};
return true;
}
function startRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:浏览器不支持 Web Speech API。');
return;
}
// 显示正在准备的状态
updateStatus('正在准备麦克风...');
if (recognition) {
console.log('[Browser Page] Recognition already exists, stopping first.');
stopRecognition();
}
if (!setupRecognition()) return;
console.log('[Browser Page] Attempting to start recognition...');
try {
// 设置更长的超时时间,确保有足够的时间获取麦克风权限
const micPermissionTimeout = setTimeout(() => {
updateStatus('获取麦克风权限超时,请刷新页面重试。');
}, 10000); // 10秒超时
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
}
})
.then(stream => {
clearTimeout(micPermissionTimeout);
console.log('[Browser Page] Microphone access granted.');
// 检查麦克风音量级别
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
const microphone = audioContext.createMediaStreamSource(stream);
const javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
javascriptNode.onaudioprocess = function () {
const array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let values = 0;
const length = array.length;
for (let i = 0; i < length; i++) {
values += (array[i]);
}
const average = values / length;
console.log('[Browser Page] Microphone volume level:', average);
// 如果音量太低,显示提示
if (average < 5) {
updateStatus('麦克风音量很低,请说话或检查麦克风设置。');
} else {
updateStatus('🎤 正在识别...');
}
// 只检查一次就断开连接
microphone.disconnect();
analyser.disconnect();
javascriptNode.disconnect();
};
// 释放测试用的音频流
setTimeout(() => {
stream.getTracks().forEach(track => track.stop());
audioContext.close();
}, 1000);
// 启动语音识别
if (recognition) {
recognition.start();
updateStatus('🎤 正在识别...');
} else {
updateStatus('错误Recognition 实例丢失。');
console.error('[Browser Page] Recognition instance lost before start.');
}
})
.catch(err => {
clearTimeout(micPermissionTimeout);
console.error('[Browser Page] Microphone access error:', err);
let errorMsg = `无法访问麦克风 (${err.name})`;
if (err.name === 'NotAllowedError') {
errorMsg = '麦克风访问被拒绝。请在浏览器设置中允许麦克风访问权限。';
} else if (err.name === 'NotFoundError') {
errorMsg = '未找到麦克风设备。请确保麦克风已连接。';
}
updateStatus(`错误: ${errorMsg}`);
recognition = null;
});
} catch (e) {
console.error('[Browser Page] Error calling recognition.start():', e);
updateStatus(`启动识别时出错: ${e.message}`);
recognition = null;
}
}
function stopRecognition() {
if (recognition) {
console.log('[Browser Page] Stopping recognition...');
updateStatus("正在停止识别...");
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error calling recognition.stop():', e);
recognition = null;
updateStatus("停止时出错,已强制重置。");
}
} else {
console.log('[Browser Page] Recognition not active, nothing to stop.');
updateStatus("识别未运行。");
}
}
function forceResetRecognition() {
console.log('[Browser Page] Force resetting recognition...');
updateStatus("强制重置语音识别...");
// 先尝试停止当前的识别
if (recognition) {
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error stopping recognition during reset:', e);
}
}
// 强制设置为null丢弃所有后续结果
recognition = null;
// 通知服务器已重置
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'reset_complete' }));
}
updateStatus("语音识别已重置,等待新指令。");
}
</script>
</body>
</html>

854
resources/asr-server/package-lock.json generated Normal file
View File

@ -0,0 +1,854 @@
{
"name": "cherry-asr-server",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "cherry-asr-server",
"version": "1.0.0",
"dependencies": {
"express": "^4.18.2",
"ws": "^8.13.0"
}
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmmirror.com/accepts/-/accepts-1.3.8.tgz",
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"license": "MIT",
"dependencies": {
"mime-types": "~2.1.34",
"negotiator": "0.6.3"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/array-flatten": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
"license": "MIT"
},
"node_modules/body-parser": {
"version": "1.20.3",
"resolved": "https://registry.npmmirror.com/body-parser/-/body-parser-1.20.3.tgz",
"integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
"qs": "6.13.0",
"raw-body": "2.5.2",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmmirror.com/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/call-bind-apply-helpers": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/call-bound": {
"version": "1.0.4",
"resolved": "https://registry.npmmirror.com/call-bound/-/call-bound-1.0.4.tgz",
"integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.2",
"get-intrinsic": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmmirror.com/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"license": "MIT",
"dependencies": {
"safe-buffer": "5.2.1"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmmirror.com/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie": {
"version": "0.7.1",
"resolved": "https://registry.npmmirror.com/cookie/-/cookie-0.7.1.tgz",
"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie-signature": {
"version": "1.0.6",
"resolved": "https://registry.npmmirror.com/cookie-signature/-/cookie-signature-1.0.6.tgz",
"integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==",
"license": "MIT"
},
"node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmmirror.com/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"license": "MIT",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/depd": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/depd/-/depd-2.0.0.tgz",
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/destroy": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/destroy/-/destroy-1.2.0.tgz",
"integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
"license": "MIT",
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/dunder-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/dunder-proto/-/dunder-proto-1.0.1.tgz",
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.1",
"es-errors": "^1.3.0",
"gopd": "^1.2.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
"license": "MIT"
},
"node_modules/encodeurl": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/encodeurl/-/encodeurl-2.0.0.tgz",
"integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/es-define-property": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz",
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-object-atoms": {
"version": "1.1.1",
"resolved": "https://registry.npmmirror.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/escape-html": {
"version": "1.0.3",
"resolved": "https://registry.npmmirror.com/escape-html/-/escape-html-1.0.3.tgz",
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
"license": "MIT"
},
"node_modules/etag": {
"version": "1.8.1",
"resolved": "https://registry.npmmirror.com/etag/-/etag-1.8.1.tgz",
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/express": {
"version": "4.21.2",
"resolved": "https://registry.npmmirror.com/express/-/express-4.21.2.tgz",
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
"license": "MIT",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "1.20.3",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.7.1",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "1.3.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"merge-descriptors": "1.0.3",
"methods": "~1.1.2",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.12",
"proxy-addr": "~2.0.7",
"qs": "6.13.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.19.0",
"serve-static": "1.16.2",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.10.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"node_modules/finalhandler": {
"version": "1.3.1",
"resolved": "https://registry.npmmirror.com/finalhandler/-/finalhandler-1.3.1.tgz",
"integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"statuses": "2.0.1",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmmirror.com/forwarded/-/forwarded-0.2.0.tgz",
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmmirror.com/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-intrinsic": {
"version": "1.3.0",
"resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.2",
"es-define-property": "^1.0.1",
"es-errors": "^1.3.0",
"es-object-atoms": "^1.1.1",
"function-bind": "^1.1.2",
"get-proto": "^1.0.1",
"gopd": "^1.2.0",
"has-symbols": "^1.1.0",
"hasown": "^2.0.2",
"math-intrinsics": "^1.1.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/get-proto/-/get-proto-1.0.1.tgz",
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
"license": "MIT",
"dependencies": {
"dunder-proto": "^1.0.1",
"es-object-atoms": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz",
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz",
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz",
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"license": "MIT",
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/http-errors": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/http-errors/-/http-errors-2.0.0.tgz",
"integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
"license": "MIT",
"dependencies": {
"depd": "2.0.0",
"inherits": "2.0.4",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"toidentifier": "1.0.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"license": "ISC"
},
"node_modules/ipaddr.js": {
"version": "1.9.1",
"resolved": "https://registry.npmmirror.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
"license": "MIT",
"engines": {
"node": ">= 0.10"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmmirror.com/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/merge-descriptors": {
"version": "1.0.3",
"resolved": "https://registry.npmmirror.com/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
"integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/methods": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/methods/-/methods-1.1.2.tgz",
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime": {
"version": "1.6.0",
"resolved": "https://registry.npmmirror.com/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"license": "MIT",
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"license": "MIT"
},
"node_modules/negotiator": {
"version": "0.6.3",
"resolved": "https://registry.npmmirror.com/negotiator/-/negotiator-0.6.3.tgz",
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/object-inspect": {
"version": "1.13.4",
"resolved": "https://registry.npmmirror.com/object-inspect/-/object-inspect-1.13.4.tgz",
"integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmmirror.com/on-finished/-/on-finished-2.4.1.tgz",
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
"license": "MIT",
"dependencies": {
"ee-first": "1.1.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/parseurl": {
"version": "1.3.3",
"resolved": "https://registry.npmmirror.com/parseurl/-/parseurl-1.3.3.tgz",
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/path-to-regexp": {
"version": "0.1.12",
"resolved": "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT"
},
"node_modules/proxy-addr": {
"version": "2.0.7",
"resolved": "https://registry.npmmirror.com/proxy-addr/-/proxy-addr-2.0.7.tgz",
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
"license": "MIT",
"dependencies": {
"forwarded": "0.2.0",
"ipaddr.js": "1.9.1"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/qs": {
"version": "6.13.0",
"resolved": "https://registry.npmmirror.com/qs/-/qs-6.13.0.tgz",
"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
"license": "BSD-3-Clause",
"dependencies": {
"side-channel": "^1.0.6"
},
"engines": {
"node": ">=0.6"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmmirror.com/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/raw-body": {
"version": "2.5.2",
"resolved": "https://registry.npmmirror.com/raw-body/-/raw-body-2.5.2.tgz",
"integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"license": "MIT"
},
"node_modules/send": {
"version": "0.19.0",
"resolved": "https://registry.npmmirror.com/send/-/send-0.19.0.tgz",
"integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"mime": "1.6.0",
"ms": "2.1.3",
"on-finished": "2.4.1",
"range-parser": "~1.2.1",
"statuses": "2.0.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/send/node_modules/encodeurl": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/encodeurl/-/encodeurl-1.0.2.tgz",
"integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/send/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/serve-static": {
"version": "1.16.2",
"resolved": "https://registry.npmmirror.com/serve-static/-/serve-static-1.16.2.tgz",
"integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==",
"license": "MIT",
"dependencies": {
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
"send": "0.19.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/setprototypeof": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/setprototypeof/-/setprototypeof-1.2.0.tgz",
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
"license": "ISC"
},
"node_modules/side-channel": {
"version": "1.1.0",
"resolved": "https://registry.npmmirror.com/side-channel/-/side-channel-1.1.0.tgz",
"integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3",
"side-channel-list": "^1.0.0",
"side-channel-map": "^1.0.1",
"side-channel-weakmap": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-list": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/side-channel-list/-/side-channel-list-1.0.0.tgz",
"integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-map": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/side-channel-map/-/side-channel-map-1.0.1.tgz",
"integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-weakmap": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
"integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3",
"side-channel-map": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/statuses": {
"version": "2.0.1",
"resolved": "https://registry.npmmirror.com/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/toidentifier": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/toidentifier/-/toidentifier-1.0.1.tgz",
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
"license": "MIT",
"engines": {
"node": ">=0.6"
}
},
"node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmmirror.com/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"license": "MIT",
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
"license": "MIT",
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/vary": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/vary/-/vary-1.1.2.tgz",
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/ws": {
"version": "8.18.1",
"resolved": "https://registry.npmmirror.com/ws/-/ws-8.18.1.tgz",
"integrity": "sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w==",
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
}
}
}

View File

@ -0,0 +1,10 @@
{
"name": "cherry-asr-server",
"version": "1.0.0",
"description": "Cherry Studio ASR Server",
"main": "server.js",
"dependencies": {
"express": "^4.18.2",
"ws": "^8.13.0"
}
}

View File

@ -0,0 +1,269 @@
// 检查依赖项
try {
console.log('ASR Server starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 检查必要的依赖项
const checkDependency = (name) => {
try {
require(name) // Removed unused variable 'module'
console.log(`Successfully loaded dependency: ${name}`)
return true
} catch (error) {
console.error(`Failed to load dependency: ${name}`, error.message)
return false
}
}
// 检查所有必要的依赖项
const dependencies = ['http', 'ws', 'express', 'path', 'fs']
const missingDeps = dependencies.filter((dep) => !checkDependency(dep))
if (missingDeps.length > 0) {
console.error(`Missing dependencies: ${missingDeps.join(', ')}. Server cannot start.`)
process.exit(1)
}
} catch (error) {
console.error('Error during dependency check:', error)
process.exit(1)
}
// 加载依赖项
const http = require('http')
const WebSocket = require('ws')
const express = require('express')
const path = require('path') // Need path module
// const fs = require('fs') // Commented out unused import 'fs'
const app = express()
const port = 34515 // Define the port
// 获取index.html文件的路径
function getIndexHtmlPath() {
const fs = require('fs')
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
// 尝试多个可能的路径
const possiblePaths = [
// 开发环境路径
path.join(__dirname, 'index.html'),
// 当前目录
path.join(process.cwd(), 'index.html'),
// 相对于可执行文件的路径
path.join(path.dirname(process.execPath), 'index.html'),
// 相对于可执行文件的上级目录的路径
path.join(path.dirname(path.dirname(process.execPath)), 'index.html'),
// 相对于可执行文件的resources目录的路径
path.join(path.dirname(process.execPath), 'resources', 'index.html'),
// 相对于可执行文件的resources/asr-server目录的路径
path.join(path.dirname(process.execPath), 'resources', 'asr-server', 'index.html'),
// 相对于可执行文件的asr-server目录的路径
path.join(path.dirname(process.execPath), 'asr-server', 'index.html'),
// 如果是pkg打包环境
process.pkg ? path.join(path.dirname(process.execPath), 'index.html') : null
].filter(Boolean) // 过滤掉null值
console.log('Possible index.html paths:', possiblePaths)
// 检查每个路径,返回第一个存在的文件
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
console.log(`Found index.html at: ${p}`)
return p
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
// 如果没有找到文件,返回默认路径并记录错误
console.error('Could not find index.html in any of the expected locations')
return path.join(__dirname, 'index.html') // 返回默认路径,即使它可能不存在
}
// 提供网页给浏览器
app.get('/', (req, res) => {
try {
const indexPath = getIndexHtmlPath()
console.log(`Serving index.html from: ${indexPath}`)
// 检查文件是否存在
const fs = require('fs')
if (!fs.existsSync(indexPath)) {
console.error(`Error: index.html not found at ${indexPath}`)
return res.status(404).send(`Error: index.html not found at ${indexPath}. <br>Please check the server logs.`)
}
res.sendFile(indexPath, (err) => {
if (err) {
console.error('Error sending index.html:', err)
res.status(500).send(`Error serving index.html: ${err.message}`)
}
})
} catch (error) {
console.error('Error in route handler:', error)
res.status(500).send(`Server error: ${error.message}`)
}
})
const server = http.createServer(app)
const wss = new WebSocket.Server({ server })
let browserConnection = null
let electronConnection = null
wss.on('connection', (ws) => {
console.log('[Server] WebSocket client connected') // Add log
ws.on('message', (message) => {
let data
try {
// Ensure message is treated as string before parsing
data = JSON.parse(message.toString())
console.log('[Server] Received message:', data) // Log parsed data
} catch (e) {
console.error('[Server] Failed to parse message or message is not JSON:', message.toString(), e)
return // Ignore non-JSON messages
}
// 识别客户端类型
if (data.type === 'identify') {
if (data.role === 'browser') {
browserConnection = ws
console.log('[Server] Browser identified and connected')
// Notify Electron that the browser is ready
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent browser_ready status to Electron')
}
// Notify Electron if it's already connected
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connected' }))
}
ws.on('close', () => {
console.log('[Server] Browser disconnected')
browserConnection = null
// Notify Electron
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser disconnected' }))
}
})
ws.on('error', (error) => {
console.error('[Server] Browser WebSocket error:', error)
browserConnection = null // Assume disconnected on error
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
}
})
} else if (data.role === 'electron') {
electronConnection = ws
console.log('[Server] Electron identified and connected')
// If browser is already connected when Electron connects, notify Electron immediately
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent initial browser_ready status to Electron')
}
ws.on('close', () => {
console.log('[Server] Electron disconnected')
electronConnection = null
// Maybe send stop to browser if electron disconnects?
// if (browserConnection) browserConnection.send(JSON.stringify({ type: 'stop' }));
})
ws.on('error', (error) => {
console.error('[Server] Electron WebSocket error:', error)
electronConnection = null // Assume disconnected on error
})
}
}
// Electron 控制开始/停止
else if (data.type === 'start' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying START command to browser')
browserConnection.send(JSON.stringify({ type: 'start' }))
} else {
console.log('[Server] Cannot relay START: Browser not connected')
// Optionally notify Electron back
electronConnection.send(JSON.stringify({ type: 'error', message: 'Browser not connected for ASR' }))
}
} else if (data.type === 'stop' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STOP command to browser')
browserConnection.send(JSON.stringify({ type: 'stop' }))
} else {
console.log('[Server] Cannot relay STOP: Browser not connected')
}
} else if (data.type === 'reset' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying RESET command to browser')
browserConnection.send(JSON.stringify({ type: 'reset' }))
} else {
console.log('[Server] Cannot relay RESET: Browser not connected')
}
}
// 浏览器发送识别结果
else if (data.type === 'result' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
// console.log('[Server] Relaying RESULT to Electron:', data.data); // Log less frequently if needed
electronConnection.send(JSON.stringify({ type: 'result', data: data.data }))
} else {
// console.log('[Server] Cannot relay RESULT: Electron not connected');
}
}
// 浏览器发送状态更新 (例如 'stopped')
else if (data.type === 'status' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STATUS to Electron:', data.message) // Log status being relayed
electronConnection.send(JSON.stringify({ type: 'status', message: data.message }))
} else {
console.log('[Server] Cannot relay STATUS: Electron not connected')
}
} else {
console.log('[Server] Received unknown message type or from unknown source:', data)
}
})
ws.on('error', (error) => {
// Generic error handling for connection before identification
console.error('[Server] Initial WebSocket connection error:', error)
// Attempt to clean up based on which connection it might be (if identified)
if (ws === browserConnection) {
browserConnection = null
if (electronConnection)
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
} else if (ws === electronConnection) {
electronConnection = null
}
})
})
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// Handle server errors
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

View File

@ -0,0 +1,114 @@
/**
* 独立的ASR服务器
* 这个文件是一个简化版的server.js用于在打包后的应用中运行
*/
// 基本依赖
const http = require('http')
const express = require('express')
const path = require('path')
const fs = require('fs')
// 输出环境信息
console.log('ASR Server starting...')
console.log('Node.js version:', process.version)
console.log('Current directory:', __dirname)
console.log('Current working directory:', process.cwd())
console.log('Command line arguments:', process.argv)
// 创建Express应用
const app = express()
const port = 34515
// 提供静态文件
app.use(express.static(__dirname))
// 提供网页给浏览器
app.get('/', (req, res) => {
try {
// 尝试多个可能的路径
const possiblePaths = [
// 当前目录
path.join(__dirname, 'index.html'),
// 上级目录
path.join(__dirname, '..', 'index.html'),
// 应用根目录
path.join(process.cwd(), 'index.html')
]
console.log('Possible index.html paths:', possiblePaths)
// 查找第一个存在的文件
let indexPath = null
for (const p of possiblePaths) {
try {
if (fs.existsSync(p)) {
indexPath = p
console.log(`Found index.html at: ${p}`)
break
}
} catch (e) {
console.error(`Error checking existence of ${p}:`, e)
}
}
if (indexPath) {
res.sendFile(indexPath)
} else {
// 如果找不到文件返回一个简单的HTML页面
console.error('Could not find index.html, serving fallback page')
res.send(`
<!DOCTYPE html>
<html>
<head>
<title>ASR Server</title>
<style>
body { font-family: sans-serif; padding: 2em; }
h1 { color: #333; }
</style>
</head>
<body>
<h1>ASR Server is running</h1>
<p>This is a fallback page because the index.html file could not be found.</p>
<p>Server is running at: http://localhost:${port}</p>
<p>Current directory: ${__dirname}</p>
<p>Working directory: ${process.cwd()}</p>
</body>
</html>
`)
}
} catch (error) {
console.error('Error serving index.html:', error)
res.status(500).send(`Server error: ${error.message}`)
}
})
// 创建HTTP服务器
const server = http.createServer(app)
// 添加进程错误处理
process.on('uncaughtException', (error) => {
console.error('[Server] Uncaught exception:', error)
// 不立即退出,给日志输出的时间
setTimeout(() => process.exit(1), 1000)
})
process.on('unhandledRejection', (reason, promise) => {
console.error('[Server] Unhandled rejection at:', promise, 'reason:', reason)
})
// 尝试启动服务器
try {
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// 处理服务器错误
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})
} catch (error) {
console.error('[Server] Critical error starting server:', error)
process.exit(1)
}

View File

@ -0,0 +1,5 @@
@echo off
echo Starting ASR Server...
cd /d %~dp0
node standalone.js
pause

View File

@ -3,7 +3,7 @@ Object.defineProperty(exports, '__esModule', { value: true })
var fs = require('fs')
var path = require('path')
var translationsDir = path.join(__dirname, '../src/renderer/src/i18n/locales')
var baseLocale = 'zh-CN'
var baseLocale = 'zh-cn'
var baseFileName = ''.concat(baseLocale, '.json')
var baseFilePath = path.join(translationsDir, baseFileName)
/**

View File

@ -2,7 +2,7 @@ import * as fs from 'fs'
import * as path from 'path'
const translationsDir = path.join(__dirname, '../src/renderer/src/i18n/locales')
const baseLocale = 'zh-CN'
const baseLocale = 'zh-cn'
const baseFileName = `${baseLocale}.json`
const baseFilePath = path.join(translationsDir, baseFileName)

View File

@ -50,6 +50,9 @@ if (!app.requestSingleInstanceLock()) {
registerIpc(mainWindow, app)
// 注意: MsTTS IPC处理程序已在ipc.ts中注册
// 不需要再次调用registerMsTTSIpcHandlers()
replaceDevtoolsFont(mainWindow)
if (process.env.NODE_ENV === 'development') {

View File

@ -12,6 +12,7 @@ import log from 'electron-log'
import { titleBarOverlayDark, titleBarOverlayLight } from './config'
import AppUpdater from './services/AppUpdater'
import { asrServerService } from './services/ASRServerService'
import BackupManager from './services/BackupManager'
import { configManager } from './services/ConfigManager'
import CopilotService from './services/CopilotService'
@ -21,7 +22,11 @@ import FileStorage from './services/FileStorage'
import { GeminiService } from './services/GeminiService'
import KnowledgeService from './services/KnowledgeService'
import mcpService from './services/MCPService'
<<<<<<< HEAD
import { memoryFileService } from './services/MemoryFileService'
=======
import * as MsTTSService from './services/MsTTSService'
>>>>>>> origin/1600822305-patch-2
import * as NutstoreService from './services/NutstoreService'
import ObsidianVaultService from './services/ObsidianVaultService'
import { ProxyConfig, proxyManager } from './services/ProxyManager'
@ -147,7 +152,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
})
)
await fileManager.clearTemp()
await fs.writeFileSync(log.transports.file.getFile().path, '')
fs.writeFileSync(log.transports.file.getFile().path, '')
return { success: true }
} catch (error: any) {
log.error('Failed to clear cache:', error)
@ -305,6 +310,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
)
// search window
<<<<<<< HEAD
ipcMain.handle(IpcChannel.SearchWindow_Open, async (_, uid: string) => {
await searchService.openSearchWindow(uid)
})
@ -331,4 +337,20 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
ipcMain.handle(IpcChannel.LongTermMemory_SaveData, async (_, data, forceOverwrite = false) => {
return await memoryFileService.saveLongTermData(data, forceOverwrite)
})
=======
ipcMain.handle(IpcChannel.SearchWindow_Open, (_, uid: string) => searchService.openSearchWindow(uid))
ipcMain.handle(IpcChannel.SearchWindow_Close, (_, uid: string) => searchService.closeSearchWindow(uid))
ipcMain.handle(IpcChannel.SearchWindow_OpenUrl, (_, uid: string, url: string) =>
searchService.openUrlInSearchWindow(uid, url)
)
// 注册ASR服务器IPC处理程序
asrServerService.registerIpcHandlers()
// 注册MsTTS IPC处理程序
ipcMain.handle(IpcChannel.MsTTS_GetVoices, MsTTSService.getVoices)
ipcMain.handle(IpcChannel.MsTTS_Synthesize, (_, text: string, voice: string, outputFormat: string) =>
MsTTSService.synthesize(text, voice, outputFormat)
)
>>>>>>> origin/1600822305-patch-2
}

View File

@ -0,0 +1,131 @@
import { ChildProcess, spawn } from 'node:child_process'
import fs from 'node:fs'
import path from 'node:path'
import { IpcChannel } from '@shared/IpcChannel'
import { app, ipcMain } from 'electron'
import log from 'electron-log'
/**
* ASR服务器服务ASR服务器进程
*/
class ASRServerService {
private asrServerProcess: ChildProcess | null = null
/**
* IPC处理程序
*/
public registerIpcHandlers(): void {
// 启动ASR服务器
ipcMain.handle(IpcChannel.Asr_StartServer, this.startServer.bind(this))
// 停止ASR服务器
ipcMain.handle(IpcChannel.Asr_StopServer, this.stopServer.bind(this))
}
/**
* ASR服务器
* @returns Promise<{success: boolean, pid?: number, error?: string}>
*/
private async startServer(): Promise<{ success: boolean; pid?: number; error?: string }> {
try {
if (this.asrServerProcess) {
return { success: true, pid: this.asrServerProcess.pid }
}
// 获取服务器文件路径
log.info('App path:', app.getAppPath())
// 在开发环境和生产环境中使用不同的路径
let serverPath = ''
const isPackaged = app.isPackaged
if (isPackaged) {
// 生产环境 (打包后) - 使用 extraResources 复制的路径
// 注意: 'app' 是 extraResources 配置中 'to' 字段的一部分
serverPath = path.join(process.resourcesPath, 'app', 'asr-server', 'server.js')
log.info('生产环境ASR 服务器路径:', serverPath)
} else {
// 开发环境 - 指向项目根目录的 asr-server
serverPath = path.join(app.getAppPath(), 'asr-server', 'server.js')
log.info('开发环境ASR 服务器路径:', serverPath)
}
// 注意:删除了 isExeFile 检查逻辑, 假设总是用 node 启动
// Removed unused variable 'isExeFile'
log.info('ASR服务器路径:', serverPath)
// 检查文件是否存在
if (!fs.existsSync(serverPath)) {
return { success: false, error: '服务器文件不存在' }
}
// 启动服务器进程
// 始终使用 node 启动 server.js
log.info(`尝试使用 node 启动: ${serverPath}`)
this.asrServerProcess = spawn('node', [serverPath], {
stdio: 'pipe', // 'pipe' 用于捕获输出, 如果需要调试可以临时改为 'inherit'
detached: false // false 通常足够
})
// 处理服务器输出
this.asrServerProcess.stdout?.on('data', (data) => {
log.info(`[ASR Server] ${data.toString()}`)
})
this.asrServerProcess.stderr?.on('data', (data) => {
log.error(`[ASR Server Error] ${data.toString()}`)
})
// 处理服务器退出
this.asrServerProcess.on('close', (code) => {
log.info(`[ASR Server] 进程退出,退出码: ${code}`)
this.asrServerProcess = null
})
// 等待一段时间确保服务器启动
await new Promise((resolve) => setTimeout(resolve, 1000))
return { success: true, pid: this.asrServerProcess.pid }
} catch (error) {
log.error('启动ASR服务器失败:', error)
return { success: false, error: (error as Error).message }
}
}
/**
* ASR服务器
* @param _event IPC事件
* @param pid ID
* @returns Promise<{success: boolean, error?: string}>
*/
private async stopServer(
_event: Electron.IpcMainInvokeEvent,
pid?: number
): Promise<{ success: boolean; error?: string }> {
try {
if (!this.asrServerProcess) {
return { success: true }
}
// 检查PID是否匹配
if (pid && this.asrServerProcess.pid !== pid) {
log.warn(`请求停止的PID (${pid}) 与当前运行的ASR服务器PID (${this.asrServerProcess.pid}) 不匹配`)
}
// 杀死进程
this.asrServerProcess.kill()
// 等待一段时间确保进程已经退出
await new Promise((resolve) => setTimeout(resolve, 500))
this.asrServerProcess = null
return { success: true }
} catch (error) {
log.error('停止ASR服务器失败:', error)
return { success: false, error: (error as Error).message }
}
}
}
// 导出单例实例
export const asrServerService = new ASRServerService()

View File

@ -1,7 +1,12 @@
import fs from 'node:fs'
export default class FileService {
public static async readFile(_: Electron.IpcMainInvokeEvent, path: string) {
return fs.readFileSync(path, 'utf8')
public static async readFile(_: Electron.IpcMainInvokeEvent, path: string, encoding?: BufferEncoding) {
// 如果指定了编码,则返回字符串,否则返回二进制数据
if (encoding) {
return fs.readFileSync(path, encoding)
} else {
return fs.readFileSync(path)
}
}
}

View File

@ -0,0 +1,137 @@
import fs from 'node:fs'
import path from 'node:path'
import { app } from 'electron'
import log from 'electron-log'
import { EdgeTTS } from 'node-edge-tts'
/**
* Microsoft Edge TTS服务
* 使Microsoft Edge的在线TTS服务API密钥
*/
class MsEdgeTTSService {
private static instance: MsEdgeTTSService
private tempDir: string
private constructor() {
this.tempDir = path.join(app.getPath('temp'), 'cherry-tts')
// 确保临时目录存在
if (!fs.existsSync(this.tempDir)) {
fs.mkdirSync(this.tempDir, { recursive: true })
}
}
/**
*
*/
public static getInstance(): MsEdgeTTSService {
if (!MsEdgeTTSService.instance) {
MsEdgeTTSService.instance = new MsEdgeTTSService()
}
return MsEdgeTTSService.instance
}
/**
*
* @returns
*/
public async getVoices(): Promise<any[]> {
try {
// 返回预定义的中文语音列表
return [
{ name: 'zh-CN-XiaoxiaoNeural', locale: 'zh-CN', gender: 'Female' },
{ name: 'zh-CN-YunxiNeural', locale: 'zh-CN', gender: 'Male' },
{ name: 'zh-CN-YunyangNeural', locale: 'zh-CN', gender: 'Male' },
{ name: 'zh-CN-XiaohanNeural', locale: 'zh-CN', gender: 'Female' },
{ name: 'zh-CN-XiaomoNeural', locale: 'zh-CN', gender: 'Female' },
{ name: 'zh-CN-XiaoxuanNeural', locale: 'zh-CN', gender: 'Female' },
{ name: 'zh-CN-XiaoruiNeural', locale: 'zh-CN', gender: 'Female' },
{ name: 'zh-CN-YunfengNeural', locale: 'zh-CN', gender: 'Male' }
]
} catch (error) {
log.error('获取Microsoft Edge TTS语音列表失败:', error)
throw error
}
}
/**
*
* @param text
* @param voice
* @param outputFormat
* @returns
*/
public async synthesize(text: string, voice: string, outputFormat: string): Promise<string> {
try {
log.info(`Microsoft Edge TTS合成语音: 文本="${text.substring(0, 30)}...", 语音=${voice}, 格式=${outputFormat}`)
// 验证输入参数
if (!text || text.trim() === '') {
throw new Error('要合成的文本不能为空')
}
if (!voice || voice.trim() === '') {
throw new Error('语音名称不能为空')
}
// 创建一个新的EdgeTTS实例并设置参数
const tts = new EdgeTTS({
voice: voice,
outputFormat: outputFormat,
timeout: 30000, // 30秒超时
rate: '+0%', // 正常语速
pitch: '+0Hz', // 正常音调
volume: '+0%' // 正常音量
})
// 生成临时文件路径
const timestamp = Date.now()
const fileExtension = outputFormat.includes('mp3') ? 'mp3' : outputFormat.split('-').pop() || 'audio'
const outputPath = path.join(this.tempDir, `tts_${timestamp}.${fileExtension}`)
log.info(`开始生成语音文件: ${outputPath}`)
// 使用ttsPromise方法生成文件
await tts.ttsPromise(text, outputPath)
// 验证生成的文件是否存在且大小大于0
if (!fs.existsSync(outputPath)) {
throw new Error(`生成的语音文件不存在: ${outputPath}`)
}
const stats = fs.statSync(outputPath)
if (stats.size === 0) {
throw new Error(`生成的语音文件大小为0: ${outputPath}`)
}
log.info(`Microsoft Edge TTS合成成功: ${outputPath}, 文件大小: ${stats.size} 字节`)
return outputPath
} catch (error: any) {
// 记录详细的错误信息
log.error(`Microsoft Edge TTS语音合成失败 (语音=${voice}):`, error)
// 尝试提供更有用的错误信息
if (error.message && typeof error.message === 'string') {
if (error.message.includes('Timed out')) {
throw new Error(`语音合成超时,请检查网络连接或尝试其他语音`)
} else if (error.message.includes('ENOTFOUND')) {
throw new Error(`无法连接到Microsoft语音服务请检查网络连接`)
} else if (error.message.includes('ECONNREFUSED')) {
throw new Error(`连接被拒绝,请检查网络设置或代理配置`)
}
}
throw error
}
}
}
// 导出单例方法
export const getVoices = async () => {
return await MsEdgeTTSService.getInstance().getVoices()
}
export const synthesize = async (text: string, voice: string, outputFormat: string) => {
return await MsEdgeTTSService.getInstance().synthesize(text, voice, outputFormat)
}

View File

@ -0,0 +1,50 @@
import { IpcChannel } from '@shared/IpcChannel'
import { BrowserWindow, ipcMain } from 'electron'
import * as MsTTSService from './MsTTSService'
/**
* MsTTS相关的IPC处理程序
*/
export function registerMsTTSIpcHandlers(): void {
// 获取可用的语音列表
ipcMain.handle(IpcChannel.MsTTS_GetVoices, MsTTSService.getVoices)
// 合成语音
ipcMain.handle(IpcChannel.MsTTS_Synthesize, (_, text: string, voice: string, outputFormat: string) =>
MsTTSService.synthesize(text, voice, outputFormat)
)
// 流式合成语音
ipcMain.handle(
IpcChannel.MsTTS_SynthesizeStream,
async (event, requestId: string, text: string, voice: string, outputFormat: string) => {
const window = BrowserWindow.fromWebContents(event.sender)
if (!window) return
try {
await MsTTSService.synthesizeStream(
text,
voice,
outputFormat,
(chunk: Uint8Array) => {
// 发送音频数据块
if (!window.isDestroyed()) {
window.webContents.send(IpcChannel.MsTTS_StreamData, requestId, chunk)
}
},
() => {
// 发送流结束信号
if (!window.isDestroyed()) {
window.webContents.send(IpcChannel.MsTTS_StreamEnd, requestId)
}
}
)
return { success: true }
} catch (error) {
console.error('流式TTS合成失败:', error)
return { success: false, error: error instanceof Error ? error.message : String(error) }
}
}
)
}

View File

@ -0,0 +1,643 @@
import fs from 'node:fs'
import path from 'node:path'
import { MsEdgeTTS, OUTPUT_FORMAT } from 'edge-tts-node' // 新版支持流式的TTS库
import { app } from 'electron'
import log from 'electron-log'
import { EdgeTTS } from 'node-edge-tts' // 旧版TTS库
// --- START OF HARDCODED VOICE LIST ---
// WARNING: This list is static and may become outdated.
// It's generally recommended to use listVoices() for the most up-to-date list.
const hardcodedVoices = [
{
Name: 'Microsoft Server Speech Text to Speech Voice (af-ZA, AdriNeural)',
ShortName: 'af-ZA-AdriNeural',
Gender: 'Female',
Locale: 'af-ZA'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (am-ET, MekdesNeural)',
ShortName: 'am-ET-MekdesNeural',
Gender: 'Female',
Locale: 'am-ET'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (ar-AE, FatimaNeural)',
ShortName: 'ar-AE-FatimaNeural',
Gender: 'Female',
Locale: 'ar-AE'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (ar-AE, HamdanNeural)',
ShortName: 'ar-AE-HamdanNeural',
Gender: 'Male',
Locale: 'ar-AE'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (ar-BH, AliNeural)',
ShortName: 'ar-BH-AliNeural',
Gender: 'Male',
Locale: 'ar-BH'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (ar-BH, LailaNeural)',
ShortName: 'ar-BH-LailaNeural',
Gender: 'Female',
Locale: 'ar-BH'
},
// ... (Many other Arabic locales/voices) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (ar-SA, ZariyahNeural)',
ShortName: 'ar-SA-ZariyahNeural',
Gender: 'Female',
Locale: 'ar-SA'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (az-AZ, BabekNeural)',
ShortName: 'az-AZ-BabekNeural',
Gender: 'Male',
Locale: 'az-AZ'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (az-AZ, BanuNeural)',
ShortName: 'az-AZ-BanuNeural',
Gender: 'Female',
Locale: 'az-AZ'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (bg-BG, BorislavNeural)',
ShortName: 'bg-BG-BorislavNeural',
Gender: 'Male',
Locale: 'bg-BG'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (bg-BG, KalinaNeural)',
ShortName: 'bg-BG-KalinaNeural',
Gender: 'Female',
Locale: 'bg-BG'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (bn-BD, NabanitaNeural)',
ShortName: 'bn-BD-NabanitaNeural',
Gender: 'Female',
Locale: 'bn-BD'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (bn-BD, PradeepNeural)',
ShortName: 'bn-BD-PradeepNeural',
Gender: 'Male',
Locale: 'bn-BD'
},
// ... (Catalan, Czech, Welsh, Danish, German, Greek, English variants) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-AU, NatashaNeural)',
ShortName: 'en-AU-NatashaNeural',
Gender: 'Female',
Locale: 'en-AU'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-AU, WilliamNeural)',
ShortName: 'en-AU-WilliamNeural',
Gender: 'Male',
Locale: 'en-AU'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-CA, ClaraNeural)',
ShortName: 'en-CA-ClaraNeural',
Gender: 'Female',
Locale: 'en-CA'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-CA, LiamNeural)',
ShortName: 'en-CA-LiamNeural',
Gender: 'Male',
Locale: 'en-CA'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-GB, LibbyNeural)',
ShortName: 'en-GB-LibbyNeural',
Gender: 'Female',
Locale: 'en-GB'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-GB, MaisieNeural)',
ShortName: 'en-GB-MaisieNeural',
Gender: 'Female',
Locale: 'en-GB'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-GB, RyanNeural)',
ShortName: 'en-GB-RyanNeural',
Gender: 'Male',
Locale: 'en-GB'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-GB, SoniaNeural)',
ShortName: 'en-GB-SoniaNeural',
Gender: 'Female',
Locale: 'en-GB'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-GB, ThomasNeural)',
ShortName: 'en-GB-ThomasNeural',
Gender: 'Male',
Locale: 'en-GB'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-HK, SamNeural)',
ShortName: 'en-HK-SamNeural',
Gender: 'Male',
Locale: 'en-HK'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-HK, YanNeural)',
ShortName: 'en-HK-YanNeural',
Gender: 'Female',
Locale: 'en-HK'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-IE, ConnorNeural)',
ShortName: 'en-IE-ConnorNeural',
Gender: 'Male',
Locale: 'en-IE'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-IE, EmilyNeural)',
ShortName: 'en-IE-EmilyNeural',
Gender: 'Female',
Locale: 'en-IE'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-IN, NeerjaNeural)',
ShortName: 'en-IN-NeerjaNeural',
Gender: 'Female',
Locale: 'en-IN'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-IN, PrabhatNeural)',
ShortName: 'en-IN-PrabhatNeural',
Gender: 'Male',
Locale: 'en-IN'
},
// ... (Many more English variants: KE, NG, NZ, PH, SG, TZ, US, ZA) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, AriaNeural)',
ShortName: 'en-US-AriaNeural',
Gender: 'Female',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, AnaNeural)',
ShortName: 'en-US-AnaNeural',
Gender: 'Female',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, ChristopherNeural)',
ShortName: 'en-US-ChristopherNeural',
Gender: 'Male',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, EricNeural)',
ShortName: 'en-US-EricNeural',
Gender: 'Male',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, GuyNeural)',
ShortName: 'en-US-GuyNeural',
Gender: 'Male',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)',
ShortName: 'en-US-JennyNeural',
Gender: 'Female',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, MichelleNeural)',
ShortName: 'en-US-MichelleNeural',
Gender: 'Female',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, RogerNeural)',
ShortName: 'en-US-RogerNeural',
Gender: 'Male',
Locale: 'en-US'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (en-US, SteffanNeural)',
ShortName: 'en-US-SteffanNeural',
Gender: 'Male',
Locale: 'en-US'
},
// ... (Spanish variants) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (es-MX, DaliaNeural)',
ShortName: 'es-MX-DaliaNeural',
Gender: 'Female',
Locale: 'es-MX'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (es-MX, JorgeNeural)',
ShortName: 'es-MX-JorgeNeural',
Gender: 'Male',
Locale: 'es-MX'
},
// ... (Estonian, Basque, Persian, Finnish, Filipino, French, Irish, Galician, Gujarati, Hebrew, Hindi, Croatian, Hungarian, Indonesian, Icelandic, Italian, Japanese) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (ja-JP, KeitaNeural)',
ShortName: 'ja-JP-KeitaNeural',
Gender: 'Male',
Locale: 'ja-JP'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (ja-JP, NanamiNeural)',
ShortName: 'ja-JP-NanamiNeural',
Gender: 'Female',
Locale: 'ja-JP'
},
// ... (Javanese, Georgian, Kazakh, Khmer, Kannada, Korean) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (ko-KR, InJoonNeural)',
ShortName: 'ko-KR-InJoonNeural',
Gender: 'Male',
Locale: 'ko-KR'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (ko-KR, SunHiNeural)',
ShortName: 'ko-KR-SunHiNeural',
Gender: 'Female',
Locale: 'ko-KR'
},
// ... (Lao, Lithuanian, Latvian, Macedonian, Malayalam, Mongolian, Marathi, Malay, Maltese, Burmese, Norwegian, Dutch, Polish, Pashto, Portuguese) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (pt-BR, AntonioNeural)',
ShortName: 'pt-BR-AntonioNeural',
Gender: 'Male',
Locale: 'pt-BR'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (pt-BR, FranciscaNeural)',
ShortName: 'pt-BR-FranciscaNeural',
Gender: 'Female',
Locale: 'pt-BR'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (pt-PT, DuarteNeural)',
ShortName: 'pt-PT-DuarteNeural',
Gender: 'Male',
Locale: 'pt-PT'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (pt-PT, RaquelNeural)',
ShortName: 'pt-PT-RaquelNeural',
Gender: 'Female',
Locale: 'pt-PT'
},
// ... (Romanian, Russian, Sinhala, Slovak, Slovenian, Somali, Albanian, Serbian, Sundanese, Swedish, Swahili, Tamil, Telugu, Thai) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (th-TH, NiwatNeural)',
ShortName: 'th-TH-NiwatNeural',
Gender: 'Male',
Locale: 'th-TH'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (th-TH, PremwadeeNeural)',
ShortName: 'th-TH-PremwadeeNeural',
Gender: 'Female',
Locale: 'th-TH'
},
// ... (Turkish, Ukrainian, Urdu, Uzbek, Vietnamese) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (vi-VN, HoaiMyNeural)',
ShortName: 'vi-VN-HoaiMyNeural',
Gender: 'Female',
Locale: 'vi-VN'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (vi-VN, NamMinhNeural)',
ShortName: 'vi-VN-NamMinhNeural',
Gender: 'Male',
Locale: 'vi-VN'
},
// ... (Chinese variants) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-CN, XiaoxiaoNeural)',
ShortName: 'zh-CN-XiaoxiaoNeural',
Gender: 'Female',
Locale: 'zh-CN'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-CN, YunxiNeural)',
ShortName: 'zh-CN-YunxiNeural',
Gender: 'Male',
Locale: 'zh-CN'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-CN, YunjianNeural)',
ShortName: 'zh-CN-YunjianNeural',
Gender: 'Male',
Locale: 'zh-CN'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-CN, YunxiaNeural)',
ShortName: 'zh-CN-YunxiaNeural',
Gender: 'Male',
Locale: 'zh-CN'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-CN, YunyangNeural)',
ShortName: 'zh-CN-YunyangNeural',
Gender: 'Male',
Locale: 'zh-CN'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-CN-liaoning, XiaobeiNeural)',
ShortName: 'zh-CN-liaoning-XiaobeiNeural',
Gender: 'Female',
Locale: 'zh-CN-liaoning'
},
// { Name: 'Microsoft Server Speech Text to Speech Voice (zh-CN-shaanxi, XiaoniNeural)', ShortName: 'zh-CN-shaanxi-XiaoniNeural', Gender: 'Female', Locale: 'zh-CN-shaanxi' }, // Example regional voice
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-HK, HiuGaaiNeural)',
ShortName: 'zh-HK-HiuGaaiNeural',
Gender: 'Female',
Locale: 'zh-HK'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-HK, HiuMaanNeural)',
ShortName: 'zh-HK-HiuMaanNeural',
Gender: 'Female',
Locale: 'zh-HK'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-HK, WanLungNeural)',
ShortName: 'zh-HK-WanLungNeural',
Gender: 'Male',
Locale: 'zh-HK'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-TW, HsiaoChenNeural)',
ShortName: 'zh-TW-HsiaoChenNeural',
Gender: 'Female',
Locale: 'zh-TW'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-TW, HsiaoYuNeural)',
ShortName: 'zh-TW-HsiaoYuNeural',
Gender: 'Female',
Locale: 'zh-TW'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zh-TW, YunJheNeural)',
ShortName: 'zh-TW-YunJheNeural',
Gender: 'Male',
Locale: 'zh-TW'
},
// ... (Zulu) ...
{
Name: 'Microsoft Server Speech Text to Speech Voice (zu-ZA, ThandoNeural)',
ShortName: 'zu-ZA-ThandoNeural',
Gender: 'Female',
Locale: 'zu-ZA'
},
{
Name: 'Microsoft Server Speech Text to Speech Voice (zu-ZA, ThembaNeural)',
ShortName: 'zu-ZA-ThembaNeural',
Gender: 'Male',
Locale: 'zu-ZA'
}
]
// --- END OF HARDCODED VOICE LIST ---
/**
* 线TTS服务
* 使线TTS服务API密钥
*/
class MsTTSService {
private static instance: MsTTSService
private tempDir: string
private constructor() {
this.tempDir = path.join(app.getPath('temp'), 'cherry-tts')
if (!fs.existsSync(this.tempDir)) {
fs.mkdirSync(this.tempDir, { recursive: true })
}
log.info('初始化免费在线TTS服务 (使用硬编码语音列表)')
}
public static getInstance(): MsTTSService {
if (!MsTTSService.instance) {
MsTTSService.instance = new MsTTSService()
}
return MsTTSService.instance
}
/**
*
* @param text
* @param voice ShortName ( 'zh-CN-XiaoxiaoNeural')
* @param outputFormat ( 'audio-24khz-48kbitrate-mono-mp3')
* @param onData
* @param onEnd
*/
public async synthesizeStream(
text: string,
voice: string,
outputFormat: string,
onData: (chunk: Uint8Array) => void,
onEnd: () => void
): Promise<void> {
try {
// 记录详细的请求信息
log.info(`流式微软在线TTS合成语音: 文本="${text.substring(0, 30)}...", 语音=${voice}, 格式=${outputFormat}`)
// 验证输入参数
if (!text || text.trim() === '') {
throw new Error('要合成的文本不能为空')
}
if (!voice || voice.trim() === '') {
throw new Error('语音名称不能为空')
}
// 创建一个新的MsEdgeTTS实例
const tts = new MsEdgeTTS({
enableLogger: false // 禁用内部日志
})
// 设置元数据
let msOutputFormat: OUTPUT_FORMAT
if (outputFormat.includes('mp3')) {
msOutputFormat = OUTPUT_FORMAT.AUDIO_24KHZ_48KBITRATE_MONO_MP3
} else if (outputFormat.includes('webm')) {
msOutputFormat = OUTPUT_FORMAT.WEBM_24KHZ_16BIT_MONO_OPUS
} else {
msOutputFormat = OUTPUT_FORMAT.AUDIO_24KHZ_48KBITRATE_MONO_MP3
}
await tts.setMetadata(voice, msOutputFormat)
// 创建流
const audioStream = tts.toStream(text)
// 监听数据事件
audioStream.on('data', (data: Buffer) => {
onData(data)
})
// 监听结束事件
audioStream.on('end', () => {
log.info(`流式微软在线TTS合成成功`)
onEnd()
})
// 监听错误事件
audioStream.on('error', (error: Error) => {
log.error(`流式微软在线TTS语音合成失败:`, error)
throw error
})
} catch (error: any) {
// 记录详细的错误信息
log.error(`流式微软在线TTS语音合成失败 (语音=${voice}):`, error)
throw error
}
}
/**
* ()
* @returns
*/
public async getVoices(): Promise<any[]> {
try {
log.info(`返回硬编码的 ${hardcodedVoices.length} 个语音列表`)
// 直接返回硬编码的列表
// 注意:保持 async 是为了接口兼容性,虽然这里没有实际的异步操作
return hardcodedVoices
} catch (error) {
// 这个 try/catch 在这里意义不大了,因为返回静态数据不会出错
// 但保留结构以防未来改动
log.error('获取硬编码语音列表时出错 (理论上不应发生):', error)
return [] // 返回空列表以防万一
}
}
/**
*
* @param text
* @param voice ShortName ( 'zh-CN-XiaoxiaoNeural')
* @param outputFormat ( 'audio-24khz-48kbitrate-mono-mp3')
* @returns
*/
public async synthesize(text: string, voice: string, outputFormat: string): Promise<string> {
try {
// 记录详细的请求信息
log.info(`微软在线TTS合成语音: 文本="${text.substring(0, 30)}...", 语音=${voice}, 格式=${outputFormat}`)
// 验证输入参数
if (!text || text.trim() === '') {
throw new Error('要合成的文本不能为空')
}
if (!voice || voice.trim() === '') {
throw new Error('语音名称不能为空')
}
// 创建一个新的EdgeTTS实例并设置参数
// 添加超时设置默认为30秒
const tts = new EdgeTTS({
voice: voice,
outputFormat: outputFormat,
timeout: 30000, // 30秒超时
rate: '+0%', // 正常语速
pitch: '+0Hz', // 正常音调
volume: '+0%' // 正常音量
})
// 生成临时文件路径
const timestamp = Date.now()
const fileExtension = outputFormat.includes('mp3') ? 'mp3' : outputFormat.split('-').pop() || 'audio'
const outputPath = path.join(this.tempDir, `tts_${timestamp}.${fileExtension}`)
log.info(`开始生成语音文件: ${outputPath}`)
// 使用ttsPromise方法生成文件
await tts.ttsPromise(text, outputPath)
// 验证生成的文件是否存在且大小大于0
if (!fs.existsSync(outputPath)) {
throw new Error(`生成的语音文件不存在: ${outputPath}`)
}
const stats = fs.statSync(outputPath)
if (stats.size === 0) {
throw new Error(`生成的语音文件大小为0: ${outputPath}`)
}
log.info(`微软在线TTS合成成功: ${outputPath}, 文件大小: ${stats.size} 字节`)
return outputPath
} catch (error: any) {
// 记录详细的错误信息
log.error(`微软在线TTS语音合成失败 (语音=${voice}):`, error)
// 尝试提供更有用的错误信息
if (error.message && typeof error.message === 'string') {
if (error.message.includes('Timed out')) {
throw new Error(`语音合成超时,请检查网络连接或尝试其他语音`)
} else if (error.message.includes('ENOTFOUND')) {
throw new Error(`无法连接到微软语音服务,请检查网络连接`)
} else if (error.message.includes('ECONNREFUSED')) {
throw new Error(`连接被拒绝,请检查网络设置或代理配置`)
}
}
throw error
}
}
/**
* ()
*/
public async cleanupTempDir(): Promise<void> {
// (Cleanup method remains the same)
try {
const files = await fs.promises.readdir(this.tempDir)
for (const file of files) {
if (file.startsWith('tts_')) {
await fs.promises.unlink(path.join(this.tempDir, file))
}
}
log.info('TTS 临时文件已清理')
} catch (error) {
log.error('清理 TTS 临时文件失败:', error)
}
}
}
// 导出单例方法 (保持不变)
export const getVoices = async () => {
return await MsTTSService.getInstance().getVoices()
}
export const synthesize = async (text: string, voice: string, outputFormat: string) => {
return await MsTTSService.getInstance().synthesize(text, voice, outputFormat)
}
export const synthesizeStream = async (
text: string,
voice: string,
outputFormat: string,
onData: (chunk: Uint8Array) => void,
onEnd: () => void
) => {
return await MsTTSService.getInstance().synthesizeStream(text, voice, outputFormat, onData, onEnd)
}
export const cleanupTtsTempFiles = async () => {
await MsTTSService.getInstance().cleanupTempDir()
}

View File

@ -4,7 +4,8 @@ import path from 'node:path'
import { app } from 'electron'
export function getResourcePath() {
return path.join(app.getAppPath(), 'resources')
// 在打包环境中使用process.resourcesPath否则使用app.getAppPath()/resources
return app.isPackaged ? process.resourcesPath : path.join(app.getAppPath(), 'resources')
}
export function getDataPath() {

View File

@ -66,7 +66,7 @@ const api = {
binaryFile: (fileId: string) => ipcRenderer.invoke(IpcChannel.File_BinaryFile, fileId)
},
fs: {
read: (path: string) => ipcRenderer.invoke(IpcChannel.Fs_Read, path)
read: (path: string, encoding?: BufferEncoding) => ipcRenderer.invoke(IpcChannel.Fs_Read, path, encoding)
},
export: {
toWord: (markdown: string, fileName: string) => ipcRenderer.invoke(IpcChannel.Export_Word, markdown, fileName)
@ -121,6 +121,11 @@ const api = {
toggle: () => ipcRenderer.invoke(IpcChannel.MiniWindow_Toggle),
setPin: (isPinned: boolean) => ipcRenderer.invoke(IpcChannel.MiniWindow_SetPin, isPinned)
},
msTTS: {
getVoices: () => ipcRenderer.invoke(IpcChannel.MsTTS_GetVoices),
synthesize: (text: string, voice: string, outputFormat: string) =>
ipcRenderer.invoke(IpcChannel.MsTTS_Synthesize, text, voice, outputFormat)
},
aes: {
encrypt: (text: string, secretKey: string, iv: string) =>
ipcRenderer.invoke(IpcChannel.Aes_Encrypt, text, secretKey, iv),
@ -183,6 +188,7 @@ const api = {
closeSearchWindow: (uid: string) => ipcRenderer.invoke(IpcChannel.SearchWindow_Close, uid),
openUrlInSearchWindow: (uid: string, url: string) => ipcRenderer.invoke(IpcChannel.SearchWindow_OpenUrl, uid, url)
},
<<<<<<< HEAD
memory: {
loadData: () => ipcRenderer.invoke(IpcChannel.Memory_LoadData),
saveData: (data: any) => ipcRenderer.invoke(IpcChannel.Memory_SaveData, data),
@ -190,6 +196,11 @@ const api = {
loadLongTermData: () => ipcRenderer.invoke(IpcChannel.LongTermMemory_LoadData),
saveLongTermData: (data: any, forceOverwrite: boolean = false) =>
ipcRenderer.invoke(IpcChannel.LongTermMemory_SaveData, data, forceOverwrite)
=======
asrServer: {
startServer: () => ipcRenderer.invoke(IpcChannel.Asr_StartServer),
stopServer: (pid: number) => ipcRenderer.invoke(IpcChannel.Asr_StopServer, pid)
>>>>>>> origin/1600822305-patch-2
}
}

View File

@ -1,42 +1,43 @@
<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="initial-scale=1, width=device-width" />
<meta
http-equiv="Content-Security-Policy"
content="default-src 'self'; connect-src blob: *; script-src 'self' 'unsafe-eval' *; worker-src 'self' blob:; style-src 'self' 'unsafe-inline' *; font-src 'self' data: *; img-src 'self' data: file: * blob:; frame-src * file:" />
<title>Cherry Studio</title>
<style>
html,
body {
margin: 0;
}
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="initial-scale=1, width=device-width" />
<meta http-equiv="Content-Security-Policy"
content="default-src 'self'; connect-src blob: *; script-src 'self' 'unsafe-eval' *; worker-src 'self' blob:; style-src 'self' 'unsafe-inline' *; font-src 'self' data: *; img-src 'self' data: file: * blob:; media-src blob: *; frame-src * file:" />
<title>Cherry Studio</title>
#spinner {
position: fixed;
width: 100vw;
height: 100vh;
flex-direction: row;
justify-content: center;
align-items: center;
display: none;
}
<style>
html,
body {
margin: 0;
}
#spinner img {
width: 100px;
border-radius: 50px;
}
</style>
</head>
#spinner {
position: fixed;
width: 100vw;
height: 100vh;
flex-direction: row;
justify-content: center;
align-items: center;
display: none;
}
<body>
<div id="root"></div>
<div id="spinner">
<img src="/src/assets/images/logo.png" />
</div>
<script type="module" src="/src/init.ts"></script>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>
#spinner img {
width: 100px;
border-radius: 50px;
}
</style>
</head>
<body>
<div id="root"></div>
<div id="spinner">
<img src="/src/assets/images/logo.png" />
</div>
<script type="module" src="/src/init.ts"></script>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

View File

@ -0,0 +1,395 @@
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Browser ASR (External)</title>
<style>
body {
font-family: sans-serif;
padding: 1em;
}
#status {
margin-top: 1em;
font-style: italic;
color: #555;
}
#result {
margin-top: 0.5em;
border: 1px solid #ccc;
padding: 0.5em;
min-height: 50px;
background: #f9f9f9;
}
</style>
</head>
<body>
<h1>浏览器语音识别中继页面</h1>
<p>这个页面需要在浏览器中保持打开,以便应用使用其语音识别功能。</p>
<div id="status">正在连接到服务器...</div>
<div id="result"></div>
<script>
const statusDiv = document.getElementById('status');
const resultDiv = document.getElementById('result');
const ws = new WebSocket('ws://localhost:8080'); // Use the defined port
let recognition = null;
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
function updateStatus(message) {
console.log(`[Browser Page Status] ${message}`);
statusDiv.textContent = message;
}
ws.onopen = () => {
updateStatus('已连接到服务器,等待指令...');
ws.send(JSON.stringify({ type: 'identify', role: 'browser' }));
};
ws.onmessage = (event) => {
let data;
try {
data = JSON.parse(event.data);
console.log('[Browser Page] Received command:', data);
} catch (e) {
console.error('[Browser Page] Received non-JSON message:', event.data);
return;
}
if (data.type === 'start') {
startRecognition();
} else if (data.type === 'stop') {
stopRecognition();
} else if (data.type === 'reset') {
// 强制重置语音识别
forceResetRecognition();
} else {
console.warn('[Browser Page] Received unknown command type:', data.type);
}
};
ws.onerror = (error) => {
console.error('[Browser Page] WebSocket Error:', error);
updateStatus('WebSocket 连接错误!请检查服务器是否运行。');
};
ws.onclose = () => {
console.log('[Browser Page] WebSocket Connection Closed');
updateStatus('与服务器断开连接。请刷新页面或重启服务器。');
stopRecognition();
};
function setupRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:此浏览器不支持 Web Speech API。');
return false;
}
if (recognition && recognition.recognizing) {
console.log('[Browser Page] Recognition already active.');
return true;
}
recognition = new SpeechRecognition();
recognition.lang = 'zh-CN';
recognition.continuous = true;
recognition.interimResults = true;
// 增加以下设置提高语音识别的可靠性
recognition.maxAlternatives = 3; // 返回多个可能的识别结果
// 设置较短的语音识别时间,使用户能更快地看到结果
// 注意:这个属性不是标准的,可能不是所有浏览器都支持
try {
// @ts-ignore
recognition.audioStart = 0.1; // 尝试设置较低的起始音量阈值
} catch (e) {
console.log('[Browser Page] audioStart property not supported');
}
recognition.onstart = () => {
updateStatus("🎤 正在识别...");
console.log('[Browser Page] SpeechRecognition started.');
};
recognition.onresult = (event) => {
console.log('[Browser Page] Recognition result event:', event);
let interim_transcript = '';
let final_transcript = '';
// 输出识别结果的详细信息便于调试
for (let i = event.resultIndex; i < event.results.length; ++i) {
const confidence = event.results[i][0].confidence;
console.log(`[Browser Page] Result ${i}: ${event.results[i][0].transcript} (Confidence: ${confidence.toFixed(2)})`);
if (event.results[i].isFinal) {
final_transcript += event.results[i][0].transcript;
} else {
interim_transcript += event.results[i][0].transcript;
}
}
const resultText = final_transcript || interim_transcript;
resultDiv.textContent = resultText;
// 更新状态显示
if (resultText) {
updateStatus(`🎤 正在识别... (已捕捉到语音)`);
}
if (ws.readyState === WebSocket.OPEN) {
console.log(`[Browser Page] Sending ${final_transcript ? 'final' : 'interim'} result to server:`, resultText);
ws.send(JSON.stringify({ type: 'result', data: { text: resultText, isFinal: !!final_transcript } }));
}
};
recognition.onerror = (event) => {
console.error(`[Browser Page] SpeechRecognition Error - Type: ${event.error}, Message: ${event.message}`);
// 根据错误类型提供更友好的错误提示
let errorMessage = '';
switch (event.error) {
case 'no-speech':
errorMessage = '未检测到语音,请确保麦克风工作正常并尝试说话。';
// 尝试重新启动语音识别
setTimeout(() => {
if (recognition) {
try {
recognition.start();
console.log('[Browser Page] Restarting recognition after no-speech error');
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
}
}
}, 1000);
break;
case 'audio-capture':
errorMessage = '无法捕获音频,请确保麦克风已连接并已授权。';
break;
case 'not-allowed':
errorMessage = '浏览器不允许使用麦克风,请检查权限设置。';
break;
case 'network':
errorMessage = '网络错误导致语音识别失败。';
break;
case 'aborted':
errorMessage = '语音识别被用户或系统中止。';
break;
default:
errorMessage = `识别错误: ${event.error}`;
}
updateStatus(`错误: ${errorMessage}`);
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({
type: 'error',
data: {
error: event.error,
message: errorMessage || event.message || `Recognition error: ${event.error}`
}
}));
}
};
recognition.onend = () => {
console.log('[Browser Page] SpeechRecognition ended.');
// 检查是否是由于错误或用户手动停止导致的结束
const isErrorOrStopped = statusDiv.textContent.includes('错误') || statusDiv.textContent.includes('停止');
if (!isErrorOrStopped) {
// 如果不是由于错误或手动停止,则自动重新启动语音识别
updateStatus("识别暂停,正在重新启动...");
// 保存当前的recognition对象
const currentRecognition = recognition;
// 尝试重新启动语音识别
setTimeout(() => {
try {
if (currentRecognition && currentRecognition === recognition) {
currentRecognition.start();
console.log('[Browser Page] Automatically restarting recognition');
} else {
// 如果recognition对象已经变化重新创建一个
setupRecognition();
if (recognition) {
recognition.start();
console.log('[Browser Page] Created new recognition instance and started');
}
}
} catch (e) {
console.error('[Browser Page] Failed to restart recognition:', e);
updateStatus("识别已停止。等待指令...");
}
}, 300);
} else {
updateStatus("识别已停止。等待指令...");
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'stopped' }));
}
// 只有在手动停止或错误时才重置recognition对象
recognition = null;
}
};
return true;
}
function startRecognition() {
if (!SpeechRecognition) {
updateStatus('错误:浏览器不支持 Web Speech API。');
return;
}
// 显示正在准备的状态
updateStatus('正在准备麦克风...');
if (recognition) {
console.log('[Browser Page] Recognition already exists, stopping first.');
stopRecognition();
}
if (!setupRecognition()) return;
console.log('[Browser Page] Attempting to start recognition...');
try {
// 设置更长的超时时间,确保有足够的时间获取麦克风权限
const micPermissionTimeout = setTimeout(() => {
updateStatus('获取麦克风权限超时,请刷新页面重试。');
}, 10000); // 10秒超时
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
}
})
.then(stream => {
clearTimeout(micPermissionTimeout);
console.log('[Browser Page] Microphone access granted.');
// 检查麦克风音量级别
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
const microphone = audioContext.createMediaStreamSource(stream);
const javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
javascriptNode.onaudioprocess = function () {
const array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let values = 0;
const length = array.length;
for (let i = 0; i < length; i++) {
values += (array[i]);
}
const average = values / length;
console.log('[Browser Page] Microphone volume level:', average);
// 如果音量太低,显示提示
if (average < 5) {
updateStatus('麦克风音量很低,请说话或检查麦克风设置。');
} else {
updateStatus('🎤 正在识别...');
}
// 只检查一次就断开连接
microphone.disconnect();
analyser.disconnect();
javascriptNode.disconnect();
};
// 释放测试用的音频流
setTimeout(() => {
stream.getTracks().forEach(track => track.stop());
audioContext.close();
}, 1000);
// 启动语音识别
if (recognition) {
recognition.start();
updateStatus('🎤 正在识别...');
} else {
updateStatus('错误Recognition 实例丢失。');
console.error('[Browser Page] Recognition instance lost before start.');
}
})
.catch(err => {
clearTimeout(micPermissionTimeout);
console.error('[Browser Page] Microphone access error:', err);
let errorMsg = `无法访问麦克风 (${err.name})`;
if (err.name === 'NotAllowedError') {
errorMsg = '麦克风访问被拒绝。请在浏览器设置中允许麦克风访问权限。';
} else if (err.name === 'NotFoundError') {
errorMsg = '未找到麦克风设备。请确保麦克风已连接。';
}
updateStatus(`错误: ${errorMsg}`);
recognition = null;
});
} catch (e) {
console.error('[Browser Page] Error calling recognition.start():', e);
updateStatus(`启动识别时出错: ${e.message}`);
recognition = null;
}
}
function stopRecognition() {
if (recognition) {
console.log('[Browser Page] Stopping recognition...');
updateStatus("正在停止识别...");
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error calling recognition.stop():', e);
recognition = null;
updateStatus("停止时出错,已强制重置。");
}
} else {
console.log('[Browser Page] Recognition not active, nothing to stop.');
updateStatus("识别未运行。");
}
}
function forceResetRecognition() {
console.log('[Browser Page] Force resetting recognition...');
updateStatus("强制重置语音识别...");
// 先尝试停止当前的识别
if (recognition) {
try {
recognition.stop();
} catch (e) {
console.error('[Browser Page] Error stopping recognition during reset:', e);
}
}
// 强制设置为null丢弃所有后续结果
recognition = null;
// 通知服务器已重置
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ type: 'status', message: 'reset_complete' }));
}
updateStatus("语音识别已重置,等待新指令。");
}
</script>
</body>
</html>

View File

@ -0,0 +1,27 @@
{
"name": "cherry-asr-server",
"version": "1.0.0",
"description": "Cherry Studio ASR Server",
"main": "server.js",
"bin": "server.js",
"scripts": {
"start": "node server.js",
"build": "pkg ."
},
"pkg": {
"targets": [
"node16-win-x64"
],
"outputPath": "dist",
"assets": [
"index.html"
]
},
"dependencies": {
"express": "^4.18.2",
"ws": "^8.13.0"
},
"devDependencies": {
"pkg": "^5.8.1"
}
}

View File

@ -0,0 +1,179 @@
const http = require('http')
const WebSocket = require('ws')
const express = require('express')
const path = require('path') // Need path module
const app = express()
const port = 34515 // Define the port
// 获取index.html文件的路径
function getIndexHtmlPath() {
// 在开发环境中,直接使用相对路径
const devPath = path.join(__dirname, 'index.html')
// 在pkg打包后文件会被包含在可执行文件中
// 使用process.pkg检测是否是打包环境
if (process.pkg) {
// 在打包环境中,使用绝对路径
return path.join(path.dirname(process.execPath), 'index.html')
}
// 如果文件存在,返回开发路径
try {
if (require('fs').existsSync(devPath)) {
return devPath
}
} catch (e) {
console.error('Error checking file existence:', e)
}
// 如果都不存在,尝试使用当前目录
return path.join(process.cwd(), 'index.html')
}
// 提供网页给浏览器
app.get('/', (req, res) => {
const indexPath = getIndexHtmlPath()
console.log(`Serving index.html from: ${indexPath}`)
res.sendFile(indexPath)
})
const server = http.createServer(app)
const wss = new WebSocket.Server({ server })
let browserConnection = null
let electronConnection = null
wss.on('connection', (ws) => {
console.log('[Server] WebSocket client connected') // Add log
ws.on('message', (message) => {
let data
try {
// Ensure message is treated as string before parsing
data = JSON.parse(message.toString())
console.log('[Server] Received message:', data) // Log parsed data
} catch (e) {
console.error('[Server] Failed to parse message or message is not JSON:', message.toString(), e)
return // Ignore non-JSON messages
}
// 识别客户端类型
if (data.type === 'identify') {
if (data.role === 'browser') {
browserConnection = ws
console.log('[Server] Browser identified and connected')
// Notify Electron that the browser is ready
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent browser_ready status to Electron')
}
// Notify Electron if it's already connected
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connected' }))
}
ws.on('close', () => {
console.log('[Server] Browser disconnected')
browserConnection = null
// Notify Electron
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser disconnected' }))
}
})
ws.on('error', (error) => {
console.error('[Server] Browser WebSocket error:', error)
browserConnection = null // Assume disconnected on error
if (electronConnection) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
}
})
} else if (data.role === 'electron') {
electronConnection = ws
console.log('[Server] Electron identified and connected')
// If browser is already connected when Electron connects, notify Electron immediately
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
electronConnection.send(JSON.stringify({ type: 'status', message: 'browser_ready' }))
console.log('[Server] Sent initial browser_ready status to Electron')
}
ws.on('close', () => {
console.log('[Server] Electron disconnected')
electronConnection = null
// Maybe send stop to browser if electron disconnects?
// if (browserConnection) browserConnection.send(JSON.stringify({ type: 'stop' }));
})
ws.on('error', (error) => {
console.error('[Server] Electron WebSocket error:', error)
electronConnection = null // Assume disconnected on error
})
}
}
// Electron 控制开始/停止
else if (data.type === 'start' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying START command to browser')
browserConnection.send(JSON.stringify({ type: 'start' }))
} else {
console.log('[Server] Cannot relay START: Browser not connected')
// Optionally notify Electron back
electronConnection.send(JSON.stringify({ type: 'error', message: 'Browser not connected for ASR' }))
}
} else if (data.type === 'stop' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STOP command to browser')
browserConnection.send(JSON.stringify({ type: 'stop' }))
} else {
console.log('[Server] Cannot relay STOP: Browser not connected')
}
} else if (data.type === 'reset' && ws === electronConnection) {
if (browserConnection && browserConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying RESET command to browser')
browserConnection.send(JSON.stringify({ type: 'reset' }))
} else {
console.log('[Server] Cannot relay RESET: Browser not connected')
}
}
// 浏览器发送识别结果
else if (data.type === 'result' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
// console.log('[Server] Relaying RESULT to Electron:', data.data); // Log less frequently if needed
electronConnection.send(JSON.stringify({ type: 'result', data: data.data }))
} else {
// console.log('[Server] Cannot relay RESULT: Electron not connected');
}
}
// 浏览器发送状态更新 (例如 'stopped')
else if (data.type === 'status' && ws === browserConnection) {
if (electronConnection && electronConnection.readyState === WebSocket.OPEN) {
console.log('[Server] Relaying STATUS to Electron:', data.message) // Log status being relayed
electronConnection.send(JSON.stringify({ type: 'status', message: data.message }))
} else {
console.log('[Server] Cannot relay STATUS: Electron not connected')
}
} else {
console.log('[Server] Received unknown message type or from unknown source:', data)
}
})
ws.on('error', (error) => {
// Generic error handling for connection before identification
console.error('[Server] Initial WebSocket connection error:', error)
// Attempt to clean up based on which connection it might be (if identified)
if (ws === browserConnection) {
browserConnection = null
if (electronConnection)
electronConnection.send(JSON.stringify({ type: 'status', message: 'Browser connection error' }))
} else if (ws === electronConnection) {
electronConnection = null
}
})
})
server.listen(port, () => {
console.log(`[Server] Server running at http://localhost:${port}`)
})
// Handle server errors
server.on('error', (error) => {
console.error(`[Server] Failed to start server:`, error)
process.exit(1) // Exit if server fails to start
})

View File

@ -0,0 +1,243 @@
import { AudioOutlined, LoadingOutlined } from '@ant-design/icons'
import { useSettings } from '@renderer/hooks/useSettings'
import ASRService from '@renderer/services/ASRService'
import { Button, Tooltip } from 'antd'
import { FC, useCallback, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
interface Props {
onTranscribed: (text: string, isFinal?: boolean) => void
disabled?: boolean
style?: React.CSSProperties
}
const ASRButton: FC<Props> = ({ onTranscribed, disabled = false, style }) => {
const { t } = useTranslation()
const { asrEnabled } = useSettings()
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [countdown, setCountdown] = useState(0)
const [isCountingDown, setIsCountingDown] = useState(false)
const handleASR = useCallback(async () => {
if (!asrEnabled) {
window.message.error({ content: t('settings.asr.error.not_enabled'), key: 'asr-error' })
return
}
if (isRecording) {
// 停止录音并处理
setIsRecording(false)
setIsProcessing(true)
try {
// 添加事件监听器监听服务器发送的stopped消息
const originalCallback = ASRService.resultCallback
const stopCallback = (text: string) => {
// 如果是空字符串,只重置状态,不调用原始回调
if (text === '') {
setIsProcessing(false)
return
}
// 否则调用原始回调并重置状态
if (originalCallback) originalCallback(text)
setIsProcessing(false)
}
await ASRService.stopRecording(stopCallback)
} catch (error) {
console.error('ASR error:', error)
setIsProcessing(false)
}
} else {
// 开始录音
// 显示3秒倒计时同时立即开始录音
setIsCountingDown(true)
setCountdown(3)
setIsRecording(true)
// 立即发送开始信号
try {
await ASRService.startRecording(onTranscribed)
} catch (error) {
console.error('Failed to start recording:', error)
setIsRecording(false)
setIsCountingDown(false)
return
}
// 倒计时结束后只隐藏倒计时显示
setTimeout(() => {
setIsCountingDown(false)
}, 3000) // 3秒倒计时
}
}, [asrEnabled, isRecording, onTranscribed, t])
const handleCancel = useCallback(() => {
if (isCountingDown) {
// 如果在倒计时中,取消倒计时和录音
setIsCountingDown(false)
setCountdown(0)
// 同时取消录音,因为录音已经开始
ASRService.cancelRecording()
setIsRecording(false)
} else if (isRecording) {
// 如果已经在录音,取消录音
ASRService.cancelRecording()
setIsRecording(false)
}
}, [isRecording, isCountingDown])
// 倒计时效果
useEffect(() => {
if (isCountingDown && countdown > 0) {
const timer = setTimeout(() => {
setCountdown(countdown - 1)
}, 1000)
return () => clearTimeout(timer)
}
return undefined // 添加返回值以解决TS7030错误
}, [countdown, isCountingDown])
if (!asrEnabled) {
return null
}
return (
<Tooltip
title={
isRecording
? t('settings.asr.stop')
: isCountingDown
? `${t('settings.asr.preparing')} (${countdown})`
: t('settings.asr.start')
}>
<ButtonWrapper>
<StyledButton
type={isRecording || isCountingDown ? 'primary' : 'default'}
icon={isProcessing ? <LoadingOutlined /> : isCountingDown ? null : <AudioOutlined />}
onClick={handleASR}
onDoubleClick={handleCancel}
disabled={disabled || isProcessing || (isCountingDown && countdown > 0)}
style={style}
className={isCountingDown ? 'counting-down' : ''}>
{isCountingDown && <CountdownNumber>{countdown}</CountdownNumber>}
</StyledButton>
{isCountingDown && (
<CountdownIndicator>
{t('settings.asr.preparing')} ({countdown})
</CountdownIndicator>
)}
</ButtonWrapper>
</Tooltip>
)
}
const ButtonWrapper = styled.div`
position: relative;
display: inline-block;
`
const CountdownIndicator = styled.div`
position: absolute;
top: -25px;
left: 50%;
transform: translateX(-50%);
background-color: var(--color-primary);
color: white;
padding: 2px 8px;
border-radius: 10px;
font-size: 12px;
white-space: nowrap;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
animation: pulse 1s infinite;
z-index: 10;
@keyframes pulse {
0% {
opacity: 0.7;
}
50% {
opacity: 1;
}
100% {
opacity: 0.7;
}
}
&:after {
content: '';
position: absolute;
bottom: -5px;
left: 50%;
transform: translateX(-50%);
width: 0;
height: 0;
border-left: 5px solid transparent;
border-right: 5px solid transparent;
border-top: 5px solid var(--color-primary);
}
`
const CountdownNumber = styled.span`
font-size: 18px;
font-weight: bold;
animation: zoom 1s infinite;
@keyframes zoom {
0% {
transform: scale(0.8);
}
50% {
transform: scale(1.2);
}
100% {
transform: scale(0.8);
}
}
`
const StyledButton = styled(Button)`
min-width: 30px;
height: 30px;
font-size: 16px;
border-radius: 50%;
transition: all 0.3s ease;
color: var(--color-icon);
display: flex;
flex-direction: row;
justify-content: center;
align-items: center;
padding: 0;
border: none; /* 移除边框 */
&.anticon,
&.iconfont {
transition: all 0.3s ease;
color: var(--color-icon);
}
&:hover {
background-color: var(--color-background-soft);
.anticon,
.iconfont {
color: var(--color-text-1);
}
}
&.active {
background-color: var(--color-primary) !important;
.anticon,
.iconfont {
color: var(--color-white-soft);
}
&:hover {
background-color: var(--color-primary);
}
}
&.counting-down {
font-weight: bold;
background-color: var(--color-primary);
color: var(--color-white-soft);
}
`
export default ASRButton

View File

@ -0,0 +1,621 @@
import {
AudioMutedOutlined,
AudioOutlined,
CloseOutlined,
DownOutlined,
DragOutlined,
PauseCircleOutlined,
PlayCircleOutlined,
SettingOutlined,
SoundOutlined,
UpOutlined
} from '@ant-design/icons'
import { Button, Space, Tooltip } from 'antd'
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useDispatch } from 'react-redux'
import styled from 'styled-components'
import { VoiceCallService } from '../services/VoiceCallService'
import { setIsVoiceCallActive, setLastPlayedMessageId, setSkipNextAutoTTS } from '../store/settings'
import VoiceVisualizer from './VoiceVisualizer'
interface Props {
visible: boolean
onClose: () => void
position?: { x: number; y: number }
onPositionChange?: (position: { x: number; y: number }) => void
}
// --- 样式组件 ---
const Container = styled.div`
width: 300px;
background-color: var(--color-background);
border-radius: 8px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
overflow: hidden;
display: flex;
flex-direction: column;
transform-origin: top left;
will-change: transform;
position: fixed;
z-index: 1000;
left: 0;
top: 0;
cursor: default;
`
const Header = styled.div`
padding: 8px 12px;
background-color: var(--color-primary);
color: white;
font-weight: bold;
display: flex;
align-items: center;
cursor: move;
user-select: none;
position: relative;
&::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
height: 4px;
background-color: rgba(255, 255, 255, 0.2);
}
&:hover::before {
background-color: rgba(255, 255, 255, 0.4);
}
.drag-icon {
margin-right: 8px; // DragOutlined 的样式
}
.settings-button {
margin-left: auto; // 推到最右边
color: white; // 设置按钮颜色
}
`
const CloseButton = styled.div`
margin-left: 8px; // 与设置按钮保持间距
cursor: pointer;
`
const Content = styled.div`
display: flex;
flex-direction: column;
gap: 10px;
padding: 12px;
`
const VisualizerContainer = styled.div`
display: flex;
justify-content: space-between;
height: 60px;
`
const TranscriptContainer = styled.div`
flex: 1;
min-height: 60px;
max-height: 100px;
overflow-y: auto;
border: 1px solid var(--color-border);
border-radius: 8px;
padding: 8px;
background-color: var(--color-background-2);
`
const TranscriptText = styled.div`
margin-bottom: 8px;
`
const UserLabel = styled.span`
font-weight: bold;
color: var(--color-primary);
`
const ControlsContainer = styled.div`
display: flex;
justify-content: center;
padding: 8px 0;
`
const RecordButton = styled(Button)`
min-width: 120px;
`
// 设置面板的样式
const SettingsPanel = styled.div`
margin-bottom: 10px;
padding: 10px;
border: 1px solid var(--color-border);
border-radius: 8px;
`
const SettingsTitle = styled.div`
margin-bottom: 8px;
`
const ShortcutKeyButton = styled(Button)`
min-width: 120px;
`
const SettingsTip = styled.div`
margin-top: 8px;
font-size: 12px;
color: var(--color-text-secondary);
`
// --- 样式组件结束 ---
const DraggableVoiceCallWindow: React.FC<Props> = ({
visible,
onClose,
position = { x: 20, y: 20 },
onPositionChange
}) => {
const { t } = useTranslation()
const dispatch = useDispatch()
const [isDragging, setIsDragging] = useState(false)
const [currentPosition, setCurrentPosition] = useState(position)
const dragStartRef = useRef<{ startX: number; startY: number; initialX: number; initialY: number } | null>(null)
const containerRef = useRef<HTMLDivElement>(null)
// --- 语音通话状态 ---
const [transcript, setTranscript] = useState('')
const [isListening, setIsListening] = useState(false)
const [isSpeaking, setIsSpeaking] = useState(false)
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [isPaused, setIsPaused] = useState(false)
const [isMuted, setIsMuted] = useState(false)
// --- 语音通话状态结束 ---
// --- 快捷键相关状态 ---
const [shortcutKey, setShortcutKey] = useState('Space')
const [isShortcutPressed, setIsShortcutPressed] = useState(false)
const [isSettingsVisible, setIsSettingsVisible] = useState(false)
const [tempShortcutKey, setTempShortcutKey] = useState(shortcutKey)
const [isRecordingShortcut, setIsRecordingShortcut] = useState(false)
const [isCollapsed, setIsCollapsed] = useState(false)
// --- 快捷键相关状态结束 ---
const isInitializedRef = useRef(false)
// --- 拖拽逻辑 ---
const handleDragStart = useCallback(
(e: React.MouseEvent) => {
if ((e.target as HTMLElement).closest('button, input, a')) {
return
}
e.preventDefault()
setIsDragging(true)
dragStartRef.current = {
startX: e.clientX,
startY: e.clientY,
initialX: currentPosition.x,
initialY: currentPosition.y
}
},
[currentPosition]
)
const handleDrag = useCallback(
(e: MouseEvent) => {
if (!isDragging || !dragStartRef.current) return
e.preventDefault()
const deltaX = e.clientX - dragStartRef.current.startX
const deltaY = e.clientY - dragStartRef.current.startY
let newX = dragStartRef.current.initialX + deltaX
let newY = dragStartRef.current.initialY + deltaY
const windowWidth = window.innerWidth
const windowHeight = window.innerHeight
const containerWidth = containerRef.current?.offsetWidth || 300
const containerHeight = containerRef.current?.offsetHeight || 300
newX = Math.max(0, Math.min(newX, windowWidth - containerWidth))
newY = Math.max(0, Math.min(newY, windowHeight - containerHeight))
const newPosition = { x: newX, y: newY }
setCurrentPosition(newPosition)
onPositionChange?.(newPosition)
},
[isDragging, onPositionChange]
)
const handleDragEnd = useCallback(
(e: MouseEvent) => {
if (isDragging) {
e.preventDefault()
setIsDragging(false)
dragStartRef.current = null
}
},
[isDragging] // 移除了 currentPosition 依赖,因为它只在 handleDragStart 中读取一次
)
const throttle = useMemo(() => {
let lastCall = 0
const delay = 16 // ~60fps
return (func: (e: MouseEvent) => void) => {
return (e: MouseEvent) => {
const now = new Date().getTime()
if (now - lastCall < delay) {
return
}
lastCall = now
func(e)
}
}
}, [])
const throttledHandleDrag = useMemo(() => throttle(handleDrag), [handleDrag, throttle])
useEffect(() => {
if (isDragging) {
document.addEventListener('mousemove', throttledHandleDrag)
document.addEventListener('mouseup', handleDragEnd)
document.body.style.cursor = 'move'
} else {
document.removeEventListener('mousemove', throttledHandleDrag)
document.removeEventListener('mouseup', handleDragEnd)
document.body.style.cursor = 'default'
}
return () => {
document.removeEventListener('mousemove', throttledHandleDrag)
document.removeEventListener('mouseup', handleDragEnd)
document.body.style.cursor = 'default'
}
}, [isDragging, throttledHandleDrag, handleDragEnd])
// --- 拖拽逻辑结束 ---
// --- 状态和副作用管理 ---
useEffect(() => {
const handleTTSStateChange = (event: CustomEvent) => {
const { isPlaying } = event.detail
setIsSpeaking(isPlaying)
}
const startVoiceCall = async () => {
try {
window.message.loading({ content: t('voice_call.initializing'), key: 'voice-call-init' })
try {
await VoiceCallService.initialize()
} catch (initError) {
console.warn('语音识别服务初始化警告:', initError)
}
await VoiceCallService.startCall({
onTranscript: setTranscript,
onResponse: () => {
/* 响应在聊天界面处理 */
},
onListeningStateChange: setIsListening,
onSpeakingStateChange: setIsSpeaking
})
window.message.success({ content: t('voice_call.ready'), key: 'voice-call-init' })
isInitializedRef.current = true
} catch (error) {
console.error('Voice call error:', error)
window.message.error({ content: t('voice_call.error'), key: 'voice-call-init' })
onClose()
}
}
if (visible) {
dispatch(setIsVoiceCallActive(true))
dispatch(setLastPlayedMessageId(null))
dispatch(setSkipNextAutoTTS(true))
if (!isInitializedRef.current) {
startVoiceCall()
}
window.addEventListener('tts-state-change', handleTTSStateChange as EventListener)
} else if (!visible && isInitializedRef.current) {
dispatch(setIsVoiceCallActive(false))
dispatch(setSkipNextAutoTTS(false))
VoiceCallService.endCall()
setTranscript('')
setIsListening(false)
setIsSpeaking(false)
setIsRecording(false)
setIsProcessing(false)
setIsPaused(false)
setIsMuted(false)
isInitializedRef.current = false
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener)
}
return () => {
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener)
}
}, [visible, dispatch, t, onClose])
// --- 状态和副作用管理结束 ---
// --- 语音通话控制函数 ---
const toggleMute = useCallback(() => {
const newMuteState = !isMuted
setIsMuted(newMuteState)
VoiceCallService.setMuted(newMuteState)
}, [isMuted]) // 添加依赖
const togglePause = useCallback(() => {
const newPauseState = !isPaused
setIsPaused(newPauseState)
VoiceCallService.setPaused(newPauseState)
}, [isPaused]) // 添加依赖
// !! 将这些函数定义移到 handleKeyDown/handleKeyUp 之前 !!
const handleRecordStart = useCallback(
async (e: React.MouseEvent | React.TouchEvent | KeyboardEvent) => {
e.preventDefault()
if (isProcessing || isPaused) return
setTranscript('')
VoiceCallService.stopTTS()
setIsSpeaking(false)
setIsRecording(true)
setIsProcessing(true)
try {
await VoiceCallService.startRecording()
setIsProcessing(false)
} catch (error) {
window.message.error({ content: '启动语音识别失败,请确保语音识别服务已启动', key: 'voice-call-error' })
setIsRecording(false)
setIsProcessing(false)
}
},
[isProcessing, isPaused]
)
const handleRecordEnd = useCallback(
async (e: React.MouseEvent | React.TouchEvent | KeyboardEvent) => {
e.preventDefault()
if (!isRecording) return
setIsRecording(false)
setIsProcessing(true)
VoiceCallService.stopTTS()
setIsSpeaking(false)
try {
const success = await VoiceCallService.stopRecordingAndSendToChat()
if (success) {
window.message.success({ content: '语音识别已完成,正在发送消息...', key: 'voice-call-send' })
} else {
window.message.error({ content: '发送语音识别结果失败', key: 'voice-call-error' })
}
} catch (error) {
window.message.error({ content: '停止录音出错', key: 'voice-call-error' })
} finally {
setTimeout(() => setIsProcessing(false), 500)
}
},
[isRecording]
)
const handleRecordCancel = useCallback(
async (e: React.MouseEvent | React.TouchEvent | KeyboardEvent) => {
e.preventDefault()
if (isRecording) {
setIsRecording(false)
setIsProcessing(true)
VoiceCallService.stopTTS()
setIsSpeaking(false)
try {
await VoiceCallService.cancelRecording()
setTranscript('')
} catch (error) {
console.error('取消录音出错:', error)
} finally {
setTimeout(() => setIsProcessing(false), 500)
}
}
},
[isRecording]
)
// --- 语音通话控制函数结束 ---
// --- 快捷键相关函数 ---
const getKeyDisplayName = (keyCode: string) => {
const keyMap: Record<string, string> = {
Space: '空格键',
Enter: '回车键',
ShiftLeft: '左Shift键',
ShiftRight: '右Shift键',
ControlLeft: '左Ctrl键',
ControlRight: '右Ctrl键',
AltLeft: '左Alt键',
AltRight: '右Alt键'
}
return keyMap[keyCode] || keyCode
}
const handleShortcutKeyChange = useCallback(
(e: KeyboardEvent) => {
e.preventDefault()
if (isRecordingShortcut) {
setTempShortcutKey(e.code)
setIsRecordingShortcut(false)
}
},
[isRecordingShortcut]
)
const saveShortcutKey = useCallback(() => {
setShortcutKey(tempShortcutKey)
localStorage.setItem('voiceCallShortcutKey', tempShortcutKey)
setIsSettingsVisible(false)
}, [tempShortcutKey])
// 现在可以安全地使用 handleRecordStart/End
const handleKeyDown = useCallback(
(e: KeyboardEvent) => {
if (isRecordingShortcut) {
handleShortcutKeyChange(e)
return
}
if (e.code === shortcutKey && !isProcessing && !isPaused && visible && !isShortcutPressed) {
e.preventDefault()
setIsShortcutPressed(true)
const mockEvent = new MouseEvent('mousedown') as unknown as React.MouseEvent // 类型断言
handleRecordStart(mockEvent) // 现在 handleRecordStart 已经定义
}
},
[
shortcutKey,
isProcessing,
isPaused,
visible,
isShortcutPressed,
handleRecordStart, // 依赖项
isRecordingShortcut,
handleShortcutKeyChange
]
)
const handleKeyUp = useCallback(
(e: KeyboardEvent) => {
if (e.code === shortcutKey && isShortcutPressed && visible) {
e.preventDefault()
setIsShortcutPressed(false)
const mockEvent = new MouseEvent('mouseup') as unknown as React.MouseEvent // 类型断言
handleRecordEnd(mockEvent) // 现在 handleRecordEnd 已经定义
}
},
[shortcutKey, isShortcutPressed, visible, handleRecordEnd]
) // 依赖项
useEffect(() => {
const savedShortcut = localStorage.getItem('voiceCallShortcutKey')
if (savedShortcut) {
setShortcutKey(savedShortcut)
setTempShortcutKey(savedShortcut)
}
}, [])
useEffect(() => {
if (visible) {
window.addEventListener('keydown', handleKeyDown)
window.addEventListener('keyup', handleKeyUp)
}
return () => {
window.removeEventListener('keydown', handleKeyDown)
window.removeEventListener('keyup', handleKeyUp)
}
}, [visible, handleKeyDown, handleKeyUp])
// --- 快捷键相关函数结束 ---
// 如果不可见,直接返回 null
if (!visible) return null
// --- JSX 渲染 ---
return (
<Container
ref={containerRef}
style={{
transform: `translate(${currentPosition.x}px, ${currentPosition.y}px)` // 使用 transform 定位
}}>
{/* 将 onMouseDown 移到 Header 上 */}
<Header onMouseDown={handleDragStart}>
<DragOutlined className="drag-icon" /> {/* 应用样式类 */}
{t('voice_call.title')}
<Button
type="text"
icon={isCollapsed ? <DownOutlined /> : <UpOutlined />}
onClick={() => setIsCollapsed(!isCollapsed)}
className="settings-button"
/>
<Button
type="text"
icon={<SettingOutlined />}
onClick={() => setIsSettingsVisible(!isSettingsVisible)}
className="settings-button" // 应用样式类
/>
<CloseButton onClick={onClose}>
<CloseOutlined />
</CloseButton>
</Header>
<Content>
{!isCollapsed && (
<>
{isSettingsVisible && (
<SettingsPanel>
{' '}
{/* 使用 styled-component */}
<SettingsTitle>{t('voice_call.shortcut_key_setting')}</SettingsTitle> {/* 使用 styled-component */}
<Space>
<ShortcutKeyButton onClick={() => setIsRecordingShortcut(true)}>
{' '}
{/* 使用 styled-component */}
{isRecordingShortcut ? t('voice_call.press_any_key') : getKeyDisplayName(tempShortcutKey)}
</ShortcutKeyButton>
<Button type="primary" onClick={saveShortcutKey}>
{t('voice_call.save')}
</Button>
<Button onClick={() => setIsSettingsVisible(false)}>{t('voice_call.cancel')}</Button>
</Space>
<SettingsTip>
{' '}
{/* 使用 styled-component */}
{t('voice_call.shortcut_key_tip')}
</SettingsTip>
</SettingsPanel>
)}
<VisualizerContainer>
<VoiceVisualizer isActive={isListening || isRecording} type="input" />
<VoiceVisualizer isActive={isSpeaking} type="output" />
</VisualizerContainer>
<TranscriptContainer>
{transcript && (
<TranscriptText>
<UserLabel>{t('voice_call.you')}:</UserLabel> {transcript}
</TranscriptText>
)}
{/* 可以在这里添加 AI 回复的显示 */}
</TranscriptContainer>
</>
)}
<ControlsContainer>
<Space>
<Button
type="text"
icon={isMuted ? <AudioMutedOutlined /> : <AudioOutlined />}
onClick={toggleMute}
size="large"
title={isMuted ? t('voice_call.unmute') : t('voice_call.mute')}
/>
<Button
type="text"
icon={isPaused ? <PlayCircleOutlined /> : <PauseCircleOutlined />}
onClick={togglePause}
size="large"
title={isPaused ? t('voice_call.resume') : t('voice_call.pause')}
/>
<Tooltip title={`${t('voice_call.press_to_talk')} (${getKeyDisplayName(shortcutKey)})`}>
<RecordButton
type={isRecording ? 'primary' : 'default'}
icon={<SoundOutlined />}
onMouseDown={handleRecordStart}
onMouseUp={handleRecordEnd}
onMouseLeave={handleRecordCancel}
onTouchStart={handleRecordStart}
onTouchEnd={handleRecordEnd}
onTouchCancel={handleRecordCancel}
size="large"
disabled={isProcessing || isPaused}>
{isRecording ? t('voice_call.release_to_send') : t('voice_call.press_to_talk')}
</RecordButton>
</Tooltip>
</Space>
</ControlsContainer>
</Content>
</Container>
)
}
export default DraggableVoiceCallWindow

View File

@ -0,0 +1,142 @@
import { SoundOutlined } from '@ant-design/icons'
import TTSService from '@renderer/services/TTSService'
import { Message } from '@renderer/types'
import { Tooltip } from 'antd'
import { useCallback, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
interface TTSButtonProps {
message: Message
className?: string
}
interface SegmentedPlaybackState {
isSegmentedPlayback: boolean
segments: {
text: string
isLoaded: boolean
isLoading: boolean
}[]
currentSegmentIndex: number
isPlaying: boolean
}
const TTSButton: React.FC<TTSButtonProps> = ({ message, className }) => {
const { t } = useTranslation()
const [isSpeaking, setIsSpeaking] = useState(false)
// 分段播放状态
const [, setSegmentedPlaybackState] = useState<SegmentedPlaybackState>({
isSegmentedPlayback: false,
segments: [],
currentSegmentIndex: 0,
isPlaying: false
})
// 添加TTS状态变化事件监听器
useEffect(() => {
const handleTTSStateChange = (event: CustomEvent) => {
const { isPlaying } = event.detail
console.log('TTS按钮检测到TTS状态变化:', isPlaying)
setIsSpeaking(isPlaying)
}
// 添加事件监听器
window.addEventListener('tts-state-change', handleTTSStateChange as EventListener)
// 组件卸载时移除事件监听器
return () => {
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener)
}
}, [])
// 监听分段播放状态变化
useEffect(() => {
const handleSegmentedPlaybackUpdate = (event: CustomEvent) => {
console.log('检测到分段播放状态更新:', event.detail)
setSegmentedPlaybackState(event.detail)
}
// 添加事件监听器
window.addEventListener('tts-segmented-playback-update', handleSegmentedPlaybackUpdate as EventListener)
// 组件卸载时移除事件监听器
return () => {
window.removeEventListener('tts-segmented-playback-update', handleSegmentedPlaybackUpdate as EventListener)
}
}, [])
// 初始化时检查TTS状态
useEffect(() => {
// 检查当前是否正在播放
const isCurrentlyPlaying = TTSService.isCurrentlyPlaying()
if (isCurrentlyPlaying !== isSpeaking) {
setIsSpeaking(isCurrentlyPlaying)
}
}, [isSpeaking])
const handleTTS = useCallback(async () => {
if (isSpeaking) {
TTSService.stop()
return // 不需要手动设置状态,事件监听器会处理
}
try {
console.log('点击TTS按钮开始播放消息')
await TTSService.speakFromMessage(message)
// 不需要手动设置状态,事件监听器会处理
} catch (error) {
console.error('TTS error:', error)
// 出错时才需要手动重置状态
setIsSpeaking(false)
}
}, [isSpeaking, message])
// 处理分段播放按钮点击 - 暂未使用,保留供未来扩展
/* const handleSegmentedTTS = useCallback(async () => {
try {
console.log('点击分段TTS按钮开始分段播放消息')
// 使用修改后的speakFromMessage方法传入segmented=true参数
await TTSService.speakFromMessage(message, true)
} catch (error) {
console.error('Segmented TTS error:', error)
}
}, [message]) */
return (
<Tooltip title={isSpeaking ? t('chat.tts.stop') : t('chat.tts.play')}>
<TTSActionButton className={className} onClick={handleTTS}>
<SoundOutlined style={{ color: isSpeaking ? 'var(--color-primary)' : 'var(--color-icon)' }} />
</TTSActionButton>
</Tooltip>
)
}
const TTSActionButton = styled.div`
cursor: pointer;
border-radius: 8px;
display: flex;
flex-direction: row;
justify-content: center;
align-items: center;
width: 30px;
height: 30px;
transition: all 0.2s ease;
&:hover {
background-color: var(--color-background-mute);
.anticon {
color: var(--color-text-1);
}
}
.anticon,
.iconfont {
cursor: pointer;
font-size: 14px;
color: var(--color-icon);
}
&:hover {
color: var(--color-text-1);
}
`
export default TTSButton

View File

@ -0,0 +1,96 @@
import { TextSegmenter } from '@renderer/services/tts/TextSegmenter'
import TTSService from '@renderer/services/TTSService'
import React, { useEffect, useState } from 'react'
import styled from 'styled-components'
interface TTSHighlightedTextProps {
text: string
}
interface SegmentedPlaybackState {
isSegmentedPlayback: boolean
segments: {
text: string
isLoaded: boolean
isLoading: boolean
}[]
currentSegmentIndex: number
isPlaying: boolean
}
const TTSHighlightedText: React.FC<TTSHighlightedTextProps> = ({ text }) => {
const [segments, setSegments] = useState<string[]>([])
const [currentSegmentIndex, setCurrentSegmentIndex] = useState<number>(-1)
// 播放状态变量,用于跟踪当前是否正在播放
const [, setIsPlaying] = useState<boolean>(false)
// 初始化时分割文本
useEffect(() => {
const textSegments = TextSegmenter.splitIntoSentences(text)
setSegments(textSegments)
}, [text])
// 监听分段播放状态变化
useEffect(() => {
const handleSegmentedPlaybackUpdate = (event: CustomEvent) => {
const data = event.detail as SegmentedPlaybackState
if (data.isSegmentedPlayback) {
setCurrentSegmentIndex(data.currentSegmentIndex)
setIsPlaying(data.isPlaying)
} else {
setCurrentSegmentIndex(-1)
setIsPlaying(false)
}
}
// 添加事件监听器
window.addEventListener('tts-segmented-playback-update', handleSegmentedPlaybackUpdate as EventListener)
// 组件卸载时移除事件监听器
return () => {
window.removeEventListener('tts-segmented-playback-update', handleSegmentedPlaybackUpdate as EventListener)
}
}, [])
// 处理段落点击
const handleSegmentClick = (index: number) => {
TTSService.playFromSegment(index)
}
if (segments.length === 0) {
return <div>{text}</div>
}
return (
<TextContainer>
{segments.map((segment, index) => (
<TextSegment
key={index}
className={index === currentSegmentIndex ? 'active' : ''}
onClick={() => handleSegmentClick(index)}>
{segment}
</TextSegment>
))}
</TextContainer>
)
}
const TextContainer = styled.div`
display: inline;
`
const TextSegment = styled.span`
cursor: pointer;
transition: background-color 0.2s ease;
&:hover {
background-color: rgba(0, 0, 0, 0.05);
}
&.active {
background-color: var(--color-primary-bg);
border-radius: 2px;
}
`
export default TTSHighlightedText

View File

@ -0,0 +1,267 @@
import { RootState } from '@renderer/store'
import React, { useEffect, useState } from 'react'
import { useSelector } from 'react-redux'
import styled from 'styled-components'
interface TTSProgressBarProps {
messageId: string
}
interface TTSProgressState {
isPlaying: boolean
progress: number // 0-100
currentTime: number
duration: number
}
const TTSProgressBar: React.FC<TTSProgressBarProps> = ({ messageId }) => {
// 获取是否显示TTS进度条的设置
const showTTSProgressBar = useSelector((state: RootState) => state.settings.showTTSProgressBar)
const [progressState, setProgressState] = useState<TTSProgressState>({
isPlaying: false,
progress: 0,
currentTime: 0,
duration: 0
})
// 添加拖动状态
const [isDragging, setIsDragging] = useState(false)
// 监听TTS进度更新事件
useEffect(() => {
const handleProgressUpdate = (event: CustomEvent) => {
const { messageId: playingMessageId, isPlaying, progress, currentTime, duration } = event.detail
// 不需要每次都输出日志,避免控制台刷屏
// 只在进度变化较大时输出日志,或者开始/结束时
// 在拖动进度条时不输出日志
// 完全关闭进度更新日志输出
// if (!isDragging &&
// playingMessageId === messageId &&
// (
// // 开始或结束播放
// (isPlaying !== progressState.isPlaying) ||
// // 每10%输出一次日志
// (Math.floor(progress / 10) !== Math.floor(progressState.progress / 10))
// )
// ) {
// console.log('TTS进度更新:', {
// messageId: messageId.substring(0, 8),
// isPlaying,
// progress: Math.round(progress),
// currentTime: Math.round(currentTime),
// duration: Math.round(duration)
// })
// }
// 只有当前消息正在播放时才更新进度
// 增加对playingMessageId的检查确保它存在且不为空
// 这样在语音通话模式下的开场白不会显示进度条
if (playingMessageId && playingMessageId === messageId) {
// 如果收到的是重置信号duration为0则强制设置为非播放状态
if (duration === 0 && currentTime === 0 && progress === 0) {
setProgressState({
isPlaying: false,
progress: 0,
currentTime: 0,
duration: 0
})
} else {
setProgressState({ isPlaying, progress, currentTime, duration })
}
} else if (progressState.isPlaying) {
// 如果当前消息不是正在播放的消息,但状态显示正在播放,则重置状态
setProgressState({
isPlaying: false,
progress: 0,
currentTime: 0,
duration: 0
})
}
}
// 监听TTS状态变化事件
const handleStateChange = (event: CustomEvent) => {
const { isPlaying } = event.detail
// 如果停止播放,重置进度条状态
if (!isPlaying && progressState.isPlaying) {
// console.log('收到TTS停止播放事件重置进度条')
setProgressState({
isPlaying: false,
progress: 0,
currentTime: 0,
duration: 0
})
}
}
// 添加事件监听器
window.addEventListener('tts-progress-update', handleProgressUpdate as EventListener)
window.addEventListener('tts-state-change', handleStateChange as EventListener)
// console.log('添加TTS进度更新事件监听器消息ID:', messageId)
// 组件卸载时移除事件监听器
return () => {
window.removeEventListener('tts-progress-update', handleProgressUpdate as EventListener)
window.removeEventListener('tts-state-change', handleStateChange as EventListener)
// console.log('移除TTS进度更新事件监听器消息ID:', messageId)
}
}, [messageId, progressState.isPlaying, isDragging])
// 如果没有播放或者设置为不显示进度条,则不显示
if (!progressState.isPlaying || !showTTSProgressBar) {
return null
}
// 处理进度条点击
const handleTrackClick = (e: React.MouseEvent<HTMLDivElement>) => {
if (!progressState.isPlaying) return
// 如果是拖动结束的点击事件,忽略
if (e.type === 'click' && e.detail === 0) return
const trackRect = e.currentTarget.getBoundingClientRect()
const clickPosition = e.clientX - trackRect.left
const trackWidth = trackRect.width
const seekPercentage = (clickPosition / trackWidth) * 100
const seekTime = (seekPercentage / 100) * progressState.duration
// console.log(`进度条点击: ${seekPercentage.toFixed(2)}%, 时间: ${seekTime.toFixed(2)}秒`)
// 调用TTS服务的seek方法
import('@renderer/services/TTSService').then(({ default: TTSService }) => {
TTSService.seek(seekTime)
})
}
// 处理拖动
const handleDrag = (e: React.MouseEvent<HTMLDivElement>) => {
if (!progressState.isPlaying) return
e.preventDefault()
e.stopPropagation() // 阻止事件冒泡
// 设置拖动状态为true
setIsDragging(true)
const trackRect = e.currentTarget.getBoundingClientRect()
const trackWidth = trackRect.width
const handleMouseMove = (moveEvent: MouseEvent) => {
if (!isDragging) return
moveEvent.preventDefault()
const dragPosition = Math.max(0, Math.min(moveEvent.clientX - trackRect.left, trackWidth))
const seekPercentage = (dragPosition / trackWidth) * 100
const seekTime = (seekPercentage / 100) * progressState.duration
// 更新本地状态以实时反映拖动位置
setProgressState((prev) => ({
...prev,
progress: seekPercentage,
currentTime: seekTime
}))
}
const handleMouseUp = (upEvent: MouseEvent) => {
if (!isDragging) return
// 设置拖动状态为false
setIsDragging(false)
document.removeEventListener('mousemove', handleMouseMove)
document.removeEventListener('mouseup', handleMouseUp)
const dragPosition = Math.max(0, Math.min(upEvent.clientX - trackRect.left, trackWidth))
const seekPercentage = (dragPosition / trackWidth) * 100
const seekTime = (seekPercentage / 100) * progressState.duration
// console.log(`拖动结束: ${seekPercentage.toFixed(2)}%, 时间: ${seekTime.toFixed(2)}秒`)
// 调用TTS服务的seek方法
import('@renderer/services/TTSService').then(({ default: TTSService }) => {
TTSService.seek(seekTime)
})
}
document.addEventListener('mousemove', handleMouseMove)
document.addEventListener('mouseup', handleMouseUp)
}
return (
<ProgressBarContainer>
<ProgressBarTrack onClick={handleTrackClick} onMouseDown={handleDrag}>
<ProgressBarFill style={{ width: `${progressState.progress}%` }} />
<ProgressBarHandle style={{ left: `${progressState.progress}%` }} />
</ProgressBarTrack>
<ProgressText>
{formatTime(progressState.currentTime)} / {formatTime(progressState.duration)}
</ProgressText>
</ProgressBarContainer>
)
}
// 格式化时间为 mm:ss 格式
const formatTime = (seconds: number): string => {
const mins = Math.floor(seconds / 60)
const secs = Math.floor(seconds % 60)
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}
const ProgressBarContainer = styled.div`
margin-top: 8px;
margin-bottom: 8px;
width: 100%;
display: flex;
flex-direction: column;
align-items: center;
`
const ProgressBarTrack = styled.div`
width: 100%;
height: 8px;
background-color: var(--color-background-mute);
border-radius: 4px;
overflow: visible;
position: relative;
cursor: pointer;
`
const ProgressBarFill = styled.div`
height: 100%;
background-color: var(--color-primary);
border-radius: 4px;
transition: width 0.1s linear;
pointer-events: none;
`
const ProgressBarHandle = styled.div`
position: absolute;
top: 50%;
transform: translate(-50%, -50%);
width: 12px;
height: 12px;
background-color: var(--color-primary);
border-radius: 50%;
cursor: pointer;
box-shadow: 0 0 4px rgba(0, 0, 0, 0.2);
z-index: 1;
opacity: 0;
transition:
opacity 0.2s ease,
transform 0.2s ease;
pointer-events: none;
${ProgressBarTrack}:hover & {
opacity: 1;
}
`
const ProgressText = styled.div`
margin-top: 4px;
font-size: 12px;
color: var(--color-text-2);
`
export default TTSProgressBar

View File

@ -0,0 +1,76 @@
import { Spin } from 'antd'
import React from 'react'
import styled from 'styled-components'
interface TTSSegmentedTextProps {
segments: {
text: string
isLoaded: boolean
isLoading: boolean
}[]
currentSegmentIndex: number
isPlaying: boolean
onSegmentClick: (index: number) => void
}
const TTSSegmentedText: React.FC<TTSSegmentedTextProps> = ({
segments,
currentSegmentIndex,
// isPlaying, // 未使用的参数
onSegmentClick
}) => {
if (!segments || segments.length === 0) {
return null
}
return (
<SegmentedTextContainer>
{segments.map((segment, index) => (
<Segment
key={index}
className={`${index === currentSegmentIndex ? 'active' : ''}`}
onClick={() => onSegmentClick(index)}>
<SegmentText>{segment.text}</SegmentText>
{segment.isLoading && <Spin size="small" className="segment-loading" />}
</Segment>
))}
</SegmentedTextContainer>
)
}
const SegmentedTextContainer = styled.div`
margin: 10px 0;
padding: 10px;
border: 1px solid var(--color-border);
border-radius: 4px;
max-height: 300px;
overflow-y: auto;
`
const Segment = styled.div`
padding: 5px;
margin: 2px 0;
cursor: pointer;
border-radius: 4px;
display: flex;
align-items: center;
&:hover {
background-color: var(--color-background-soft);
}
&.active {
background-color: var(--color-primary-bg);
border-left: 3px solid var(--color-primary);
}
.segment-loading {
margin-left: 5px;
}
`
const SegmentText = styled.span`
flex: 1;
`
export default TTSSegmentedText

View File

@ -0,0 +1,62 @@
import { LoadingOutlined, PhoneOutlined } from '@ant-design/icons'
import { Button, Tooltip } from 'antd'
import React, { useState } from 'react'
import { useTranslation } from 'react-i18next'
import { VoiceCallService } from '../services/VoiceCallService'
import DraggableVoiceCallWindow from './DraggableVoiceCallWindow'
interface Props {
disabled?: boolean
style?: React.CSSProperties
}
const VoiceCallButton: React.FC<Props> = ({ disabled = false, style }) => {
const { t } = useTranslation()
const [isWindowVisible, setIsWindowVisible] = useState(false)
const [isLoading, setIsLoading] = useState(false)
const [windowPosition, setWindowPosition] = useState({ x: 20, y: 20 })
const handleClick = async () => {
if (disabled || isLoading) return
setIsLoading(true)
try {
// 初始化语音服务
await VoiceCallService.initialize()
// 先设置窗口可见然后在DraggableVoiceCallWindow组件中处理状态更新
setIsWindowVisible(true)
// 注意不在这里调用dispatch而是在DraggableVoiceCallWindow组件中处理
} catch (error) {
console.error('Failed to initialize voice call:', error)
window.message.error(t('voice_call.initialization_failed'))
} finally {
setIsLoading(false)
}
}
return (
<>
<Tooltip title={t('voice_call.start')}>
<Button
type="text"
icon={isLoading ? <LoadingOutlined /> : <PhoneOutlined />}
onClick={handleClick}
disabled={disabled || isLoading}
style={style}
/>
</Tooltip>
<DraggableVoiceCallWindow
visible={isWindowVisible}
onClose={() => {
setIsWindowVisible(false)
// 注意不在这里调用dispatch而是在DraggableVoiceCallWindow组件中处理
}}
position={windowPosition}
onPositionChange={setWindowPosition}
/>
</>
)
}
export default VoiceCallButton

View File

@ -0,0 +1,321 @@
import {
AudioMutedOutlined,
AudioOutlined,
CloseOutlined,
PauseCircleOutlined,
PlayCircleOutlined,
SoundOutlined
} from '@ant-design/icons'
import { Button, Modal, Space, Tooltip } from 'antd'
import React, { useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import { VoiceCallService } from '../services/VoiceCallService'
import VoiceVisualizer from './VoiceVisualizer'
interface Props {
visible: boolean
onClose: () => void
}
const VoiceCallModal: React.FC<Props> = ({ visible, onClose }) => {
const { t } = useTranslation()
const [isMuted, setIsMuted] = useState(false)
const [isPaused, setIsPaused] = useState(false)
const [transcript, setTranscript] = useState('')
const [response, setResponse] = useState('')
const [isListening, setIsListening] = useState(false)
const [isSpeaking, setIsSpeaking] = useState(false)
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
// 使用useCallback包裹handleClose函数避免useEffect依赖项变化
const handleClose = React.useCallback(() => {
VoiceCallService.endCall()
onClose()
}, [onClose])
useEffect(() => {
const startVoiceCall = async () => {
try {
// 显示加载中提示
window.message.loading({ content: t('voice_call.initializing'), key: 'voice-call-init' })
// 预先初始化语音识别服务
try {
await VoiceCallService.initialize()
} catch (initError) {
console.warn('语音识别服务初始化警告:', initError)
// 不抛出异常,允许程序继续运行
}
// 启动语音通话
await VoiceCallService.startCall({
onTranscript: (text) => setTranscript(text),
onResponse: (text) => setResponse(text),
onListeningStateChange: setIsListening,
onSpeakingStateChange: setIsSpeaking
})
// 关闭加载中提示
window.message.success({ content: t('voice_call.ready'), key: 'voice-call-init' })
} catch (error) {
console.error('Voice call error:', error)
window.message.error({ content: t('voice_call.error'), key: 'voice-call-init' })
handleClose()
}
}
// 添加TTS状态变化事件监听器
const handleTTSStateChange = (event: CustomEvent) => {
const { isPlaying } = event.detail
console.log('TTS状态变化事件:', isPlaying)
setIsSpeaking(isPlaying)
}
if (visible) {
startVoiceCall()
// 添加事件监听器
window.addEventListener('tts-state-change', handleTTSStateChange as EventListener)
}
return () => {
VoiceCallService.endCall()
// 移除事件监听器
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener)
}
}, [visible, t, handleClose])
const toggleMute = () => {
const newMuteState = !isMuted
setIsMuted(newMuteState)
VoiceCallService.setMuted(newMuteState)
}
const togglePause = () => {
const newPauseState = !isPaused
setIsPaused(newPauseState)
VoiceCallService.setPaused(newPauseState)
}
// 长按说话相关处理
const handleRecordStart = async (e: React.MouseEvent | React.TouchEvent) => {
e.preventDefault() // 防止触摸事件的默认行为
if (isProcessing || isPaused) return
// 先清除之前的语音识别结果
setTranscript('')
// 无论是否正在播放都强制停止TTS
VoiceCallService.stopTTS()
setIsSpeaking(false)
// 更新UI状态
setIsRecording(true)
setIsProcessing(true) // 设置处理状态,防止重复点击
// 开始录音
try {
await VoiceCallService.startRecording()
console.log('开始录音')
setIsProcessing(false) // 录音开始后取消处理状态
} catch (error) {
console.error('开始录音出错:', error)
window.message.error({ content: '启动语音识别失败,请确保语音识别服务已启动', key: 'voice-call-error' })
setIsRecording(false)
setIsProcessing(false)
}
}
const handleRecordEnd = async (e: React.MouseEvent | React.TouchEvent) => {
e.preventDefault() // 防止触摸事件的默认行为
if (!isRecording) return
// 立即更新UI状态
setIsRecording(false)
setIsProcessing(true)
// 无论是否正在播放都强制停止TTS
VoiceCallService.stopTTS()
setIsSpeaking(false)
// 确保录音完全停止
try {
await VoiceCallService.stopRecording()
console.log('录音已停止')
} catch (error) {
console.error('停止录音出错:', error)
} finally {
// 无论成功与否,都确保在一定时间后重置处理状态
setTimeout(() => {
setIsProcessing(false)
}, 1000) // 增加延迟时间,确保有足够时间处理结果
}
}
// 处理鼠标/触摸离开按钮的情况
const handleRecordCancel = async (e: React.MouseEvent | React.TouchEvent) => {
e.preventDefault()
if (isRecording) {
// 立即更新UI状态
setIsRecording(false)
setIsProcessing(true)
// 无论是否正在播放都强制停止TTS
VoiceCallService.stopTTS()
setIsSpeaking(false)
// 取消录音不发送给AI
try {
await VoiceCallService.cancelRecording()
console.log('录音已取消')
// 清除输入文本
setTranscript('')
} catch (error) {
console.error('取消录音出错:', error)
} finally {
// 无论成功与否,都确保在一定时间后重置处理状态
setTimeout(() => {
setIsProcessing(false)
}, 1000)
}
}
}
return (
<Modal
title={t('voice_call.title')}
open={visible}
onCancel={handleClose}
footer={null}
width={500}
centered
maskClosable={false}>
<Container>
<VisualizerContainer>
<VoiceVisualizer isActive={isListening || isRecording} type="input" />
<VoiceVisualizer isActive={isSpeaking} type="output" />
</VisualizerContainer>
<TranscriptContainer>
{transcript && (
<TranscriptText>
<UserLabel>{t('voice_call.you')}:</UserLabel> {transcript}
</TranscriptText>
)}
{response && (
<ResponseText>
<AILabel>{t('voice_call.ai')}:</AILabel> {response}
</ResponseText>
)}
</TranscriptContainer>
<ControlsContainer>
<Space>
<Button
type="text"
icon={isMuted ? <AudioMutedOutlined /> : <AudioOutlined />}
onClick={toggleMute}
size="large"
title={isMuted ? t('voice_call.unmute') : t('voice_call.mute')}
/>
<Button
type="text"
icon={isPaused ? <PlayCircleOutlined /> : <PauseCircleOutlined />}
onClick={togglePause}
size="large"
title={isPaused ? t('voice_call.resume') : t('voice_call.pause')}
/>
<Tooltip title={t('voice_call.press_to_talk')}>
<RecordButton
type={isRecording ? 'primary' : 'default'}
icon={<SoundOutlined />}
onMouseDown={handleRecordStart}
onMouseUp={handleRecordEnd}
onMouseLeave={handleRecordCancel}
onTouchStart={handleRecordStart}
onTouchEnd={handleRecordEnd}
onTouchCancel={handleRecordCancel}
size="large"
disabled={isProcessing || isPaused}>
{isRecording ? t('voice_call.release_to_send') : t('voice_call.press_to_talk')}
</RecordButton>
</Tooltip>
<Button
type="primary"
icon={<CloseOutlined />}
onClick={handleClose}
danger
size="large"
title={t('voice_call.end')}
/>
</Space>
</ControlsContainer>
</Container>
</Modal>
)
}
const Container = styled.div`
display: flex;
flex-direction: column;
gap: 20px;
height: 400px;
`
const VisualizerContainer = styled.div`
display: flex;
justify-content: space-between;
height: 100px;
`
const TranscriptContainer = styled.div`
flex: 1;
overflow-y: auto;
border: 1px solid var(--color-border);
border-radius: 8px;
padding: 16px;
background-color: var(--color-background-2);
`
const TranscriptText = styled.p`
margin-bottom: 8px;
color: var(--color-text-1);
`
const ResponseText = styled.p`
margin-bottom: 8px;
color: var(--color-primary);
`
const UserLabel = styled.span`
font-weight: bold;
color: var(--color-text-1);
`
const AILabel = styled.span`
font-weight: bold;
color: var(--color-primary);
`
const ControlsContainer = styled.div`
display: flex;
justify-content: center;
padding: 10px 0;
`
const RecordButton = styled(Button)`
min-width: 150px;
transition: all 0.2s;
&:active {
transform: scale(0.95);
}
`
export default VoiceCallModal

View File

@ -0,0 +1,93 @@
import React, { useEffect, useRef } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
interface Props {
isActive: boolean
type: 'input' | 'output'
}
const VoiceVisualizer: React.FC<Props> = ({ isActive, type }) => {
const { t } = useTranslation()
const canvasRef = useRef<HTMLCanvasElement>(null)
const animationRef = useRef<number | undefined>(undefined)
useEffect(() => {
const canvas = canvasRef.current
if (!canvas) return
const ctx = canvas.getContext('2d')
if (!ctx) return
const width = canvas.width
const height = canvas.height
const drawVisualizer = () => {
ctx.clearRect(0, 0, width, height)
if (!isActive) {
// 绘制静态波形
ctx.beginPath()
ctx.moveTo(0, height / 2)
ctx.lineTo(width, height / 2)
ctx.strokeStyle = type === 'input' ? 'var(--color-text-2)' : 'var(--color-primary)'
ctx.lineWidth = 2
ctx.stroke()
return
}
// 绘制动态波形
const barCount = 30
const barWidth = width / barCount
const color = type === 'input' ? 'var(--color-text-1)' : 'var(--color-primary)'
for (let i = 0; i < barCount; i++) {
const barHeight = Math.random() * (height / 2) + 10
const x = i * barWidth
const y = height / 2 - barHeight / 2
ctx.fillStyle = color
ctx.fillRect(x, y, barWidth - 2, barHeight)
}
animationRef.current = requestAnimationFrame(drawVisualizer)
}
drawVisualizer()
return () => {
if (animationRef.current) {
cancelAnimationFrame(animationRef.current)
}
}
}, [isActive, type])
return (
<Container $type={type}>
<Label>{type === 'input' ? t('voice_call.you') : t('voice_call.ai')}</Label>
<Canvas ref={canvasRef} width={200} height={50} />
</Container>
)
}
const Container = styled.div<{ $type: 'input' | 'output' }>`
display: flex;
flex-direction: column;
align-items: center;
width: 45%;
border-radius: 8px;
padding: 10px;
background-color: ${(props) => (props.$type === 'input' ? 'var(--color-background-3)' : 'var(--color-primary-bg)')};
`
const Label = styled.div`
margin-bottom: 8px;
font-weight: bold;
`
const Canvas = styled.canvas`
width: 100%;
height: 50px;
`
export default VoiceVisualizer

View File

@ -1,5 +1,66 @@
import i18n from '@renderer/i18n'
import dayjs from 'dayjs'
// 语音通话提示词(多语言支持)
export const VOICE_CALL_PROMPTS: Record<string, string> = {
'zh-CN': `当前是语音通话模式。请注意:
1.
2. 使Markdown等
3. 使
4. 使
5. 便
6. 使
7. 使
8. 使`,
'en-US': `This is voice call mode. Please note:
1. Answer questions concisely and directly, avoiding lengthy introductions and summaries.
2. Avoid complex formatted content such as tables, code blocks, Markdown, etc.
3. Use natural, conversational language as if speaking to a person.
4. If you need to list points, use simple numbers or text markers rather than complex formats.
5. Responses should be brief and powerful, easy for users to understand through voice.
6. Avoid special symbols, emojis, punctuation marks, etc., as these can affect comprehension during voice playback.
7. Use complete sentences rather than simple keyword lists.
8. Try to use common vocabulary, avoiding obscure or technical terms unless specifically asked by the user.`,
'zh-TW': `當前是語音通話模式。請注意:
1.
2. 使Markdown等
3. 使
4. 使
5. 便
6. 使
7. 使
8. 使`,
'ja-JP': `これは音声通話モードです。ご注意ください:
1.
2. Markdownなどの複雑な書式付きコンテンツを避けてください
3. 使
4. 使
5.
6.
7. 使
8. 使`,
'ru-RU': `Это режим голосового вызова. Обратите внимание:
1. Отвечайте на вопросы кратко и прямо, избегая длинных введений и резюме.
2. Избегайте сложного форматированного содержания, такого как таблицы, блоки кода, Markdown и т.д.
3. Используйте естественный, разговорный язык, как при разговоре с человеком.
4. Если вам нужно перечислить пункты, используйте простые цифры или текстовые маркеры, а не сложные форматы.
5. Ответы должны быть краткими и содержательными, легкими для понимания пользователем через голос.
6. Избегайте специальных символов, эмодзи, знаков препинания и т.д., так как они могут затруднить понимание при воспроизведении голосом.
7. Используйте полные предложения, а не простые списки ключевых слов.
8. Старайтесь использовать общеупотребительную лексику, избегая малоизвестных или технических терминов, если пользователь специально не спрашивает о них.`
// 可以添加更多语言...
}
// 获取当前语言的默认语音通话提示词
export function getDefaultVoiceCallPrompt(): string {
const language = i18n.language || 'en-US'
// 如果没有对应语言的提示词,使用英文提示词作为后备
return VOICE_CALL_PROMPTS[language] || VOICE_CALL_PROMPTS['en-US']
}
// 为了向后兼容,保留原来的常量
export const DEFAULT_VOICE_CALL_PROMPT = getDefaultVoiceCallPrompt()
export const AGENT_PROMPT = `
You are a Prompt Generator. You will integrate user input information into a structured Prompt using Markdown syntax. Please do not use code blocks for output, display directly!

View File

@ -3,6 +3,7 @@ import { isLocalAi } from '@renderer/config/env'
import { useTheme } from '@renderer/context/ThemeProvider'
import db from '@renderer/databases'
import i18n from '@renderer/i18n'
import ASRServerService from '@renderer/services/ASRServerService'
import { useAppDispatch } from '@renderer/store'
import { setAvatar, setFilesPath, setResourcesPath, setUpdateState } from '@renderer/store/runtime'
import { delay, runAsyncFunction } from '@renderer/utils'
@ -19,7 +20,18 @@ import useUpdateHandler from './useUpdateHandler'
export function useAppInit() {
const dispatch = useAppDispatch()
const { proxyUrl, language, windowStyle, autoCheckUpdate, proxyMode, customCss, enableDataCollection } = useSettings()
const {
proxyUrl,
language,
windowStyle,
autoCheckUpdate,
proxyMode,
customCss,
enableDataCollection,
asrEnabled,
asrServiceType,
asrAutoStartServer
} = useSettings()
const { minappShow } = useRuntime()
const { setDefaultModel, setTopicNamingModel, setTranslateModel } = useDefaultModel()
const avatar = useLiveQuery(() => db.settings.get('image://avatar'))
@ -108,4 +120,18 @@ export function useAppInit() {
useEffect(() => {
enableDataCollection ? initAnalytics() : disableAnalytics()
}, [enableDataCollection])
// 自动启动ASR服务器
useEffect(() => {
if (asrEnabled && asrServiceType === 'local' && asrAutoStartServer) {
console.log('自动启动ASR服务器...')
ASRServerService.startServer().then((success) => {
if (success) {
console.log('ASR服务器自动启动成功')
} else {
console.error('ASR服务器自动启动失败')
}
})
}
}, [asrEnabled, asrServiceType, asrAutoStartServer])
}

View File

@ -1,5 +1,27 @@
{
"translation": {
"voice_call": {
"title": "Voice Call",
"start": "Start Voice Call",
"end": "End Call",
"mute": "Mute",
"unmute": "Unmute",
"pause": "Pause",
"resume": "Resume",
"you": "You",
"ai": "AI",
"press_to_talk": "Press to Talk",
"release_to_send": "Release to Send",
"initialization_failed": "Failed to initialize voice call",
"error": "Voice call error",
"initializing": "Initializing voice call...",
"ready": "Voice call ready",
"shortcut_key_setting": "Voice Recognition Shortcut Key Settings",
"press_any_key": "Press any key...",
"save": "Save",
"cancel": "Cancel",
"shortcut_key_tip": "Press this shortcut key to start recording, release to end recording and send"
},
"agents": {
"add.button": "Add to Assistant",
"add.knowledge_base": "Knowledge Base",
@ -104,6 +126,13 @@
"default.description": "Hello, I'm Default Assistant. You can start chatting with me right away",
"default.name": "Default Assistant",
"default.topic.name": "Default Topic",
"tts": {
"play": "Play speech",
"stop": "Stop playback",
"speak": "Play speech",
"stop_global": "Stop all speech playback",
"stopped": "Speech playback stopped"
},
"history": {
"assistant_node": "Assistant",
"click_to_navigate": "Click to navigate to the message",
@ -1514,6 +1543,171 @@
"privacy": {
"title": "Privacy Settings",
"enable_privacy_mode": "Anonymous reporting of errors and statistics"
},
"tts": {
"title": "Text-to-Speech Settings",
"enable": "Enable Text-to-Speech",
"enable.help": "Enable to convert text to speech",
"reset": "Reset",
"reset_title": "Reset Custom Voices and Models",
"reset_confirm": "Are you sure you want to reset all custom voices and models? This will delete all custom items you've added.",
"reset_success": "Reset successful",
"reset_help": "If voices or models display abnormally, try resetting all custom items",
"api_settings": "API Settings",
"service_type": "Service Type",
"service_type.openai": "OpenAI",
"service_type.edge": "Browser TTS",
"service_type.siliconflow": "SiliconFlow",
"service_type.refresh": "Refresh TTS service type settings",
"service_type.refreshed": "TTS service type settings refreshed",
"siliconflow_api_key": "SiliconFlow API Key",
"siliconflow_api_key.placeholder": "Enter SiliconFlow API key",
"siliconflow_api_url": "SiliconFlow API URL",
"siliconflow_api_url.placeholder": "Example: https://api.siliconflow.cn/v1/audio/speech",
"siliconflow_voice": "SiliconFlow Voice",
"siliconflow_voice.placeholder": "Select a voice",
"siliconflow_model": "SiliconFlow Model",
"siliconflow_model.placeholder": "Select a model",
"siliconflow_response_format": "Response Format",
"siliconflow_response_format.placeholder": "Default is mp3",
"siliconflow_speed": "Speech Speed",
"siliconflow_speed.placeholder": "Default is 1.0",
"api_key": "API Key",
"api_key.placeholder": "Enter OpenAI API key",
"api_url": "API URL",
"api_url.placeholder": "Example: https://api.openai.com/v1/audio/speech",
"edge_voice": "Edge TTS Voice",
"edge_voice.loading": "Loading...",
"edge_voice.refresh": "Refresh available voices",
"edge_voice.not_found": "No matching voices found",
"voice": "Voice",
"voice.placeholder": "Select a voice",
"voice_input_placeholder": "Enter voice",
"voice_add": "Add",
"voice_empty": "No custom voices yet, please add below",
"model": "Model",
"model.placeholder": "Select a model",
"model_input_placeholder": "Enter model",
"model_add": "Add",
"model_empty": "No custom models yet, please add below",
"filter_options": "Filter Options",
"filter.thinking_process": "Filter thinking process",
"filter.markdown": "Filter Markdown",
"filter.code_blocks": "Filter code blocks",
"filter.html_tags": "Filter HTML tags",
"filter.emojis": "Filter emojis",
"max_text_length": "Maximum text length",
"show_progress_bar": "Show TTS progress bar",
"test": "Test Speech",
"help": "Text-to-speech functionality supports converting text to natural-sounding speech.",
"learn_more": "Learn more",
"tab_title": "Text-to-Speech",
"play": "Play speech",
"stop": "Stop playback",
"speak": "Play speech",
"stop_global": "Stop all speech playback",
"stopped": "Speech playback stopped",
"segmented": "Segmented Playback",
"segmented_play": "Segmented Playback",
"segmented_playback": "Segmented Playback",
"error": {
"not_enabled": "Text-to-speech feature is not enabled",
"no_api_key": "API key is not set",
"no_voice": "Voice is not selected",
"no_model": "Model is not selected",
"no_edge_voice": "Browser TTS voice is not selected",
"browser_not_support": "Browser does not support speech synthesis",
"synthesis_failed": "Speech synthesis failed",
"play_failed": "Speech playback failed",
"empty_text": "Text is empty",
"general": "An error occurred during speech synthesis",
"unsupported_service_type": "Unsupported service type: {{serviceType}}"
},
"service_type.mstts": "Free Online TTS",
"edge_voice.available_count": "Available voices: {{count}}",
"edge_voice.refreshing": "Refreshing voice list...",
"edge_voice.refreshed": "Voice list refreshed",
"mstts.voice": "Free Online TTS Voice",
"mstts.output_format": "Output Format",
"mstts.info": "Free Online TTS service doesn't require an API key, completely free to use.",
"error.no_mstts_voice": "Free Online TTS voice not set"
},
"asr": {
"title": "Speech Recognition",
"tab_title": "Speech Recognition",
"enable": "Enable Speech Recognition",
"enable.help": "Enable to convert speech to text",
"service_type": "Service Type",
"service_type.browser": "Browser",
"service_type.local": "Local Server",
"api_key": "API Key",
"api_key.placeholder": "Enter OpenAI API key",
"api_url": "API URL",
"api_url.placeholder": "Example: https://api.openai.com/v1/audio/transcriptions",
"model": "Model",
"browser.info": "Use the browser's built-in speech recognition feature, no additional setup required",
"local.info": "Use local server and browser for speech recognition, need to start the server and open the browser page first",
"local.browser_tip": "Please open this page in your browser and keep the browser window open",
"local.test_connection": "Test Connection",
"local.connection_success": "Connection successful",
"local.connection_failed": "Connection failed, please make sure the server is running",
"server.start": "Start Server",
"server.stop": "Stop Server",
"server.starting": "Starting server...",
"server.started": "Server started",
"server.stopping": "Stopping server...",
"server.stopped": "Server stopped",
"server.already_running": "Server is already running",
"server.not_running": "Server is not running",
"server.start_failed": "Failed to start server",
"server.stop_failed": "Failed to stop server",
"open_browser": "Open Browser Page",
"test": "Test Speech Recognition",
"test_info": "Please use the speech recognition button in the input box to test",
"start": "Start Recording",
"stop": "Stop Recording",
"preparing": "Preparing",
"recording": "Recording...",
"processing": "Processing speech...",
"success": "Speech recognition successful",
"completed": "Speech recognition completed",
"canceled": "Recording canceled",
"error": {
"not_enabled": "Speech recognition is not enabled",
"start_failed": "Failed to start recording",
"transcribe_failed": "Failed to transcribe speech",
"no_api_key": "API key is not set",
"browser_not_support": "Browser does not support speech recognition"
},
"auto_start_server": "Automatically start server when launching the application",
"auto_start_server.help": "When enabled, the speech recognition server will automatically start when the application launches"
},
"voice": {
"title": "Voice Features",
"help": "Voice features include Text-to-Speech (TTS), Automatic Speech Recognition (ASR), and Voice Call.",
"learn_more": "Learn More"
},
"voice_call": {
"tab_title": "Voice Call",
"enable": "Enable Voice Call",
"enable.help": "Enable to use voice call feature to talk with AI",
"model": "Call Model",
"model.select": "Select Model",
"model.current": "Current Model: {{model}}",
"model.info": "Select the AI model for voice calls. Different models may provide different voice interaction experiences",
"welcome_message": "Hello, I'm your AI assistant. Please press and hold the talk button to start a conversation.",
"prompt": {
"label": "Voice Call Prompt",
"placeholder": "Enter voice call prompt",
"save": "Save",
"reset": "Reset",
"saved": "Prompt saved",
"reset_done": "Prompt reset",
"info": "This prompt will guide the AI's responses in voice call mode"
},
"asr_tts_info": "Voice call uses the Speech Recognition (ASR) and Text-to-Speech (TTS) settings above",
"test": "Test Voice Call",
"test_info": "Please use the voice call button on the right side of the input box to test"
}
},
"translate": {

View File

@ -104,6 +104,13 @@
"default.description": "こんにちは、私はデフォルトのアシスタントです。すぐにチャットを始められます。",
"default.name": "デフォルトアシスタント",
"default.topic.name": "デフォルトトピック",
"tts": {
"play": "音声を再生",
"stop": "再生を停止",
"speak": "音声を再生",
"stop_global": "すべての音声再生を停止",
"stopped": "音声再生を停止しました"
},
"history": {
"assistant_node": "アシスタント",
"click_to_navigate": "メッセージに移動",
@ -1388,6 +1395,7 @@
"title": "プライバシー設定",
"enable_privacy_mode": "匿名エラーレポートとデータ統計の送信"
},
<<<<<<< HEAD
"memory": {
"title": "メモリー機能",
"description": "AIアシスタントの長期メモリーを管理し、会話を自動分析して重要な情報を抽出します",
@ -1445,6 +1453,172 @@
"totalAnalyses": "分析回数合計",
"successRate": "成功率",
"avgAnalysisTime": "平均分析時間"
=======
"tts": {
"title": "音声合成設定",
"enable": "音声合成を有効にする",
"enable.help": "テキストを音声に変換する機能を有効にします",
"reset": "リセット",
"reset_title": "カスタム音声とモデルをリセット",
"reset_confirm": "すべてのカスタム音声とモデルをリセットしますか?追加したすべてのカスタム項目が削除されます。",
"reset_success": "リセットに成功しました",
"reset_help": "音声やモデルの表示に異常がある場合は、すべてのカスタム項目をリセットしてみてください",
"api_settings": "API設定",
"service_type": "サービスタイプ",
"service_type.openai": "OpenAI",
"service_type.edge": "ブラウザ TTS",
"test": "テスト",
"error": {
"not_enabled": "音声合成が有効になっていません",
"no_edge_voice": "ブラウザ TTSの音声が選択されていません",
"no_api_key": "APIキーが設定されていません",
"browser_not_support": "ブラウザが音声合成をサポートしていません",
"no_voice": "音声が選択されていません",
"no_model": "モデルが選択されていません",
"synthesis_failed": "音声合成に失敗しました",
"play_failed": "音声再生に失敗しました",
"empty_text": "テキストが空です",
"general": "音声合成エラーが発生しました",
"unsupported_service_type": "サポートされていないサービスタイプ: {{serviceType}}"
},
"help": "OpenAIのTTS APIを使用するには、APIキーが必要です。ブラウザ TTSはブラウザの機能を使用するため、APIキーは不要です。",
"learn_more": "詳細はこちら",
"tab_title": "音声合成",
"service_type.refresh": "TTS サービスタイプ設定を更新",
"service_type.refreshed": "TTS サービスタイプ設定が更新されました",
"api_key": "API キー",
"api_key.placeholder": "OpenAI API キーを入力してください",
"api_url": "API アドレス",
"api_url.placeholder": "例https://api.openai.com/v1/audio/speech",
"edge_voice": "ブラウザ TTS 音声",
"edge_voice.loading": "読み込み中...",
"edge_voice.refresh": "利用可能な音声リストを更新",
"edge_voice.not_found": "一致する音声が見つかりません",
"voice": "音声",
"voice.placeholder": "音声を選択してください",
"voice_input_placeholder": "音声を入力",
"voice_add": "追加",
"voice_empty": "カスタム音声がありません。下に追加してください",
"model": "モデル",
"model.placeholder": "モデルを選択してください",
"model_input_placeholder": "モデルを入力",
"model_add": "追加",
"model_empty": "カスタムモデルがありません。下に追加してください",
"filter_options": "フィルターオプション",
"filter.thinking_process": "思考プロセスをフィルター",
"filter.markdown": "Markdownタグをフィルター",
"filter.code_blocks": "コードブロックをフィルター",
"filter.html_tags": "HTMLタグをフィルター",
"max_text_length": "最大テキスト長",
"service_type.siliconflow": "シリコンフロー",
"service_type.mstts": "無料オンライン TTS",
"siliconflow_api_key": "シリコンフロー API キー",
"siliconflow_api_key.placeholder": "シリコンフロー API キーを入力してください",
"siliconflow_api_url": "シリコンフロー API アドレス",
"siliconflow_api_url.placeholder": "例https://api.siliconflow.cn/v1/audio/speech",
"siliconflow_voice": "シリコンフロー音声",
"siliconflow_voice.placeholder": "音声を選択してください",
"siliconflow_model": "シリコンフローモデル",
"siliconflow_model.placeholder": "モデルを選択してください",
"siliconflow_response_format": "レスポンス形式",
"siliconflow_response_format.placeholder": "デフォルトはmp3",
"siliconflow_speed": "話す速度",
"siliconflow_speed.placeholder": "デフォルトは1.0",
"edge_voice.available_count": "利用可能な音声: {{count}}個",
"edge_voice.refreshing": "音声リストを更新中...",
"edge_voice.refreshed": "音声リストが更新されました",
"mstts.voice": "無料オンライン TTS 音声",
"mstts.output_format": "出力形式",
"mstts.info": "無料オンラインTTSサービスはAPIキーが不要で、完全に無料で使用できます。",
"error.no_mstts_voice": "無料オンライン TTS 音声が設定されていません",
"play": "音声を再生",
"stop": "再生を停止",
"speak": "音声を再生",
"stop_global": "すべての音声再生を停止",
"stopped": "音声再生を停止しました",
"segmented": "分割",
"segmented_play": "分割再生",
"segmented_playback": "分割再生",
"filter.emojis": "絵文字をフィルター",
"show_progress_bar": "TTS進行バーを表示"
},
"asr": {
"title": "音声認識",
"tab_title": "音声認識",
"enable": "音声認識を有効にする",
"enable.help": "音声をテキストに変換する機能を有効にします",
"service_type": "サービスタイプ",
"service_type.browser": "ブラウザ",
"service_type.local": "ローカルサーバー",
"api_key": "APIキー",
"api_key.placeholder": "OpenAI APIキーを入力",
"api_url": "API URL",
"api_url.placeholder": "例https://api.openai.com/v1/audio/transcriptions",
"model": "モデル",
"browser.info": "ブラウザの内蔵音声認識機能を使用します。追加設定は不要です",
"local.info": "ローカルサーバーとブラウザを使用して音声認識を行います。サーバーを起動してブラウザページを開く必要があります",
"local.browser_tip": "このページをブラウザで開き、ブラウザウィンドウを開いたままにしてください",
"local.test_connection": "接続テスト",
"local.connection_success": "接続成功",
"local.connection_failed": "接続失敗。サーバーが起動していることを確認してください",
"server.start": "サーバー起動",
"server.stop": "サーバー停止",
"server.starting": "サーバーを起動中...",
"server.started": "サーバーが起動しました",
"server.stopping": "サーバーを停止中...",
"server.stopped": "サーバーが停止しました",
"server.already_running": "サーバーは既に実行中です",
"server.not_running": "サーバーは実行されていません",
"server.start_failed": "サーバーの起動に失敗しました",
"server.stop_failed": "サーバーの停止に失敗しました",
"open_browser": "ブラウザページを開く",
"test": "音声認識テスト",
"test_info": "入力ボックスの音声認識ボタンを使用してテストしてください",
"start": "録音開始",
"stop": "録音停止",
"preparing": "準備中",
"recording": "録音中...",
"processing": "音声処理中...",
"success": "音声認識成功",
"completed": "音声認識完了",
"canceled": "録音キャンセル",
"error": {
"not_enabled": "音声認識が有効になっていません",
"start_failed": "録音の開始に失敗しました",
"transcribe_failed": "音声の文字起こしに失敗しました",
"no_api_key": "APIキーが設定されていません",
"browser_not_support": "ブラウザが音声認識をサポートしていません"
},
"auto_start_server": "アプリ起動時にサーバーを自動起動",
"auto_start_server.help": "有効にすると、アプリ起動時に音声認識サーバーが自動的に起動します"
},
"voice": {
"title": "音声機能",
"help": "音声機能にはテキスト読み上げ(TTS)と音声認識(ASR)が含まれます。",
"learn_more": "詳細を見る"
},
"voice_call": {
"tab_title": "通話機能",
"enable": "音声通話を有効にする",
"enable.help": "有効にすると、音声通話機能を使用してAIと対話できます",
"model": "通話モデル",
"model.select": "モデルを選択",
"model.current": "現在のモデル: {{model}}",
"model.info": "音声通話用のAIモデルを選択します。モデルによって音声対話の体験が異なる場合があります",
"welcome_message": "こんにちは、AIアシスタントです。会話を始めるには、ボタンを長押ししてください。",
"prompt": {
"label": "音声通話プロンプト",
"placeholder": "音声通話プロンプトを入力",
"save": "保存",
"reset": "リセット",
"saved": "プロンプトが保存されました",
"reset_done": "プロンプトがリセットされました",
"info": "このプロンプトは音声通話モードでのAIの応答方法を指導します"
},
"asr_tts_info": "音声通話は上記の音声認識(ASR)と音声合成(TTS)の設定を使用します",
"test": "音声通話テスト",
"test_info": "入力ボックスの右側にある音声通話ボタンを使用してテストしてください"
>>>>>>> origin/1600822305-patch-2
}
},
"translate": {
@ -1485,6 +1659,28 @@
"quit": "終了",
"show_window": "ウィンドウを表示",
"visualization": "可視化"
},
"voice_call": {
"title": "音声通話",
"start": "音声通話を開始",
"end": "通話を終了",
"mute": "ミュート",
"unmute": "ミュート解除",
"pause": "一時停止",
"resume": "再開",
"you": "あなた",
"ai": "AI",
"press_to_talk": "長押しして話す",
"release_to_send": "離すと送信",
"initialization_failed": "音声通話の初期化に失敗しました",
"error": "音声通話エラー",
"initializing": "音声通話を初期化中...",
"ready": "音声通話の準備が完了しました",
"shortcut_key_setting": "音声認識ショートカットキー設定",
"press_any_key": "任意のキーを押してください...",
"save": "保存",
"cancel": "キャンセル",
"shortcut_key_tip": "このショートカットキーを押すと録音が始まり、キーを離すと録音が終了して送信されます"
}
}
}

View File

@ -104,6 +104,13 @@
"default.description": "Привет, я Ассистент по умолчанию. Вы можете начать общаться со мной прямо сейчас",
"default.name": "Ассистент по умолчанию",
"default.topic.name": "Топик по умолчанию",
"tts": {
"play": "Воспроизвести речь",
"stop": "Остановить воспроизведение",
"speak": "Воспроизвести речь",
"stop_global": "Остановить все воспроизведение речи",
"stopped": "Воспроизведение речи остановлено"
},
"history": {
"assistant_node": "Ассистент",
"click_to_navigate": "Перейти к сообщению",
@ -324,6 +331,9 @@
"503": "Серверная ошибка. Пожалуйста, попробуйте позже",
"504": "Серверная ошибка. Пожалуйста, попробуйте позже"
},
"asr": {
"browser_not_support": "Браузер не поддерживает распознавание речи"
},
"model.exists": "Модель уже существует",
"no_api_key": "Ключ API не настроен",
"provider_disabled": "Провайдер моделей не включен",
@ -1388,6 +1398,7 @@
"title": "Настройки приватности",
"enable_privacy_mode": "Анонимная отправка отчетов об ошибках и статистики"
},
<<<<<<< HEAD
"memory": {
"title": "[to be translated]:记忆功能",
"description": "[to be translated]:管理AI助手的长期记忆自动分析对话并提取重要信息",
@ -1441,6 +1452,172 @@
"confirmDelete": "[to be translated]:确认删除",
"confirmDeleteContent": "[to be translated]:确定要删除这条短期记忆吗?",
"delete": "[to be translated]:删除"
=======
"tts": {
"title": "Настройки преобразования текста в речь",
"enable": "Включить преобразование текста в речь",
"enable.help": "Включить функцию преобразования текста в речь",
"reset": "Сбросить",
"reset_title": "Сбросить пользовательские голоса и модели",
"reset_confirm": "Вы уверены, что хотите сбросить все пользовательские голоса и модели? Это удалит все добавленные вами пользовательские элементы.",
"reset_success": "Сброс выполнен успешно",
"reset_help": "Если голоса или модели отображаются некорректно, попробуйте сбросить все пользовательские элементы",
"api_settings": "Настройки API",
"service_type": "Тип сервиса",
"service_type.openai": "OpenAI",
"service_type.edge": "Edge TTS",
"test": "Тест",
"error": {
"not_enabled": "Преобразование текста в речь не включено",
"no_edge_voice": "Голос Edge TTS не выбран",
"no_api_key": "Ключ API не настроен",
"browser_not_support": "Браузер не поддерживает синтез речи",
"no_voice": "Голос не выбран",
"no_model": "Модель не выбрана",
"synthesis_failed": "Ошибка синтеза речи",
"play_failed": "Ошибка воспроизведения речи",
"empty_text": "Текст пуст",
"general": "Ошибка синтеза речи",
"unsupported_service_type": "Неподдерживаемый тип службы: {{serviceType}}"
},
"help": "Для использования API TTS OpenAI требуется ключ API. Edge TTS использует функции браузера и не требует ключа API.",
"learn_more": "Узнать больше",
"tab_title": "Синтез речи",
"service_type.refresh": "Обновить настройки типа службы TTS",
"service_type.refreshed": "Настройки типа службы TTS обновлены",
"api_key": "Ключ API",
"api_key.placeholder": "Пожалуйста, введите ключ API OpenAI",
"api_url": "URL-адрес API",
"api_url.placeholder": "Например: https://api.openai.com/v1/audio/speech",
"edge_voice": "Голос TTS браузера",
"edge_voice.loading": "Загрузка...",
"edge_voice.refresh": "Обновить список доступных голосов",
"edge_voice.not_found": "Не найдено подходящего голоса",
"voice": "Голос",
"voice.placeholder": "Пожалуйста, выберите голос",
"voice_input_placeholder": "Введите голос",
"voice_add": "Добавить",
"voice_empty": "Пользовательские голоса отсутствуют, пожалуйста, добавьте ниже",
"model": "Модель",
"model.placeholder": "Пожалуйста, выберите модель",
"model_input_placeholder": "Введите модель",
"model_add": "Добавить",
"model_empty": "Пользовательские модели отсутствуют, пожалуйста, добавьте ниже",
"filter_options": "Параметры фильтрации",
"filter.thinking_process": "Фильтровать процесс рассуждения",
"filter.markdown": "Фильтровать разметку Markdown",
"filter.code_blocks": "Фильтровать блоки кода",
"filter.html_tags": "Фильтровать HTML-теги",
"max_text_length": "Максимальная длина текста",
"service_type.siliconflow": "SiliconFlow",
"service_type.mstts": "Бесплатный онлайн TTS",
"siliconflow_api_key": "Ключ API SiliconFlow",
"siliconflow_api_key.placeholder": "Пожалуйста, введите ключ API SiliconFlow",
"siliconflow_api_url": "URL-адрес API SiliconFlow",
"siliconflow_api_url.placeholder": "Например: https://api.siliconflow.cn/v1/audio/speech",
"siliconflow_voice": "Голос SiliconFlow",
"siliconflow_voice.placeholder": "Пожалуйста, выберите голос",
"siliconflow_model": "Модель SiliconFlow",
"siliconflow_model.placeholder": "Пожалуйста, выберите модель",
"siliconflow_response_format": "Формат ответа",
"siliconflow_response_format.placeholder": "По умолчанию mp3",
"siliconflow_speed": "Скорость речи",
"siliconflow_speed.placeholder": "По умолчанию 1.0",
"edge_voice.available_count": "Доступные голоса: {{count}}",
"edge_voice.refreshing": "Обновление списка голосов...",
"edge_voice.refreshed": "Список голосов обновлен",
"mstts.voice": "Бесплатный онлайн голос TTS",
"mstts.output_format": "Формат вывода",
"mstts.info": "Бесплатная онлайн-служба TTS не требует ключа API и полностью бесплатна для использования.",
"error.no_mstts_voice": "Бесплатный онлайн голос TTS не установлен",
"play": "Воспроизвести речь",
"stop": "Остановить воспроизведение",
"speak": "Воспроизвести речь",
"stop_global": "Остановить все воспроизведения речи",
"stopped": "Воспроизведение речи остановлено",
"segmented": "Сегментация",
"segmented_play": "Сегментированное воспроизведение",
"segmented_playback": "Сегментированное воспроизведение",
"filter.emojis": "Фильтровать эмодзи",
"show_progress_bar": "Показать индикатор выполнения TTS"
},
"voice": {
"title": "Голосовые функции",
"help": "Голосовые функции включают преобразование текста в речь (TTS) и распознавание речи (ASR).",
"learn_more": "Узнать больше"
},
"asr": {
"title": "Распознавание речи",
"tab_title": "Распознавание речи",
"enable": "Включить распознавание речи",
"enable.help": "После включения можно преобразовывать речь в текст",
"service_type": "Тип службы",
"service_type.browser": "Браузер",
"service_type.local": "Локальный сервер",
"api_key": "Ключ API",
"api_key.placeholder": "Пожалуйста, введите ключ API OpenAI",
"api_url": "URL-адрес API",
"api_url.placeholder": "Например: https://api.openai.com/v1/audio/transcriptions",
"model": "Модель",
"browser.info": "Используйте встроенную функцию распознавания речи браузера, дополнительные настройки не требуются",
"local.info": "Используйте локальный сервер и браузер для распознавания речи, необходимо сначала запустить сервер и открыть страницу браузера",
"local.browser_tip": "Пожалуйста, откройте эту страницу в браузере и держите окно браузера открытым",
"local.test_connection": "Тестировать соединение",
"local.connection_success": "Соединение успешно",
"local.connection_failed": "Соединение не удалось, убедитесь, что сервер запущен",
"server.start": "Запустить сервер",
"server.stop": "Остановить сервер",
"server.starting": "Запуск сервера...",
"server.started": "Сервер запущен",
"server.stopping": "Остановка сервера...",
"server.stopped": "Сервер остановлен",
"server.already_running": "Сервер уже запущен",
"server.not_running": "Сервер не запущен",
"server.start_failed": "Не удалось запустить сервер",
"server.stop_failed": "Не удалось остановить сервер",
"open_browser": "Открыть страницу в браузере",
"test": "Тестировать распознавание речи",
"test_info": "Используйте кнопку распознавания речи в поле ввода для тестирования",
"start": "Начать запись",
"stop": "Остановить запись",
"preparing": "Подготовка",
"recording": "Запись...",
"processing": "Обработка речи...",
"success": "Распознавание речи успешно",
"completed": "Распознавание речи завершено",
"canceled": "Запись отменена",
"error": {
"not_enabled": "Функция распознавания речи не включена",
"no_api_key": "API ключ не настроен",
"browser_not_support": "Браузер не поддерживает распознавание речи",
"start_failed": "Не удалось начать запись",
"transcribe_failed": "Не удалось распознать речь"
},
"auto_start_server": "Автоматически запускать сервер при запуске приложения",
"auto_start_server.help": "После включения сервер распознавания речи будет автоматически запускаться при запуске приложения"
},
"voice_call": {
"tab_title": "Функция вызова",
"enable": "Включить голосовой вызов",
"enable.help": "После включения вы сможете использовать функцию голосового вызова для разговора с ИИ",
"model": "Модель вызова",
"model.select": "Выбрать модель",
"model.current": "Текущая модель: {{model}}",
"model.info": "Выберите модель ИИ для голосовых вызовов. Разные модели могут обеспечивать различный опыт голосового взаимодействия",
"prompt": {
"label": "Подсказка для голосового вызова",
"placeholder": "Введите подсказку для голосового вызова",
"save": "Сохранить",
"reset": "Сбросить",
"saved": "Подсказка сохранена",
"reset_done": "Подсказка сброшена",
"info": "Эта подсказка будет направлять ответы ИИ в режиме голосового вызова"
},
"asr_tts_info": "Голосовой вызов использует настройки распознавания речи (ASR) и синтеза речи (TTS), указанные выше",
"test": "Тестировать голосовой вызов",
"test_info": "Используйте кнопку голосового вызова справа от поля ввода для тестирования",
"welcome_message": "Здравствуйте, я ваш ИИ-ассистент. Пожалуйста, нажмите и удерживайте кнопку разговора для начала диалога."
>>>>>>> origin/1600822305-patch-2
}
},
"translate": {
@ -1481,6 +1658,28 @@
"quit": "Выйти",
"show_window": "Показать окно",
"visualization": "Визуализация"
},
"voice_call": {
"title": "Голосовой вызов",
"start": "Начать голосовой вызов",
"end": "Завершить вызов",
"mute": "Отключить звук",
"unmute": "Включить звук",
"pause": "Пауза",
"resume": "Продолжить",
"you": "Вы",
"ai": "ИИ",
"press_to_talk": "Нажмите и удерживайте для разговора",
"release_to_send": "Отпустите для отправки",
"initialization_failed": "Не удалось инициализировать голосовой вызов",
"error": "Ошибка голосового вызова",
"initializing": "Инициализация голосового вызова...",
"ready": "Голосовой вызов готов",
"shortcut_key_setting": "Настройки горячих клавиш для распознавания речи",
"press_any_key": "Нажмите любую клавишу...",
"save": "Сохранить",
"cancel": "Отмена",
"shortcut_key_tip": "Нажмите эту горячую клавишу, чтобы начать запись, отпустите, чтобы закончить запись и отправить"
}
}
}

View File

@ -1,5 +1,27 @@
{
"translation": {
"voice_call": {
"title": "语音通话",
"start": "开始语音通话",
"end": "结束通话",
"mute": "静音",
"unmute": "取消静音",
"pause": "暂停",
"resume": "继续",
"you": "您",
"ai": "AI",
"press_to_talk": "长按说话",
"release_to_send": "松开发送",
"initialization_failed": "初始化语音通话失败",
"error": "语音通话出错",
"initializing": "正在初始化语音通话...",
"ready": "语音通话已就绪",
"shortcut_key_setting": "语音识别快捷键设置",
"press_any_key": "请按任意键...",
"save": "保存",
"cancel": "取消",
"shortcut_key_tip": "按下此快捷键开始录音,松开快捷键结束录音并发送"
},
"agents": {
"add.button": "添加到助手",
"add.knowledge_base": "知识库",
@ -104,6 +126,13 @@
"default.description": "你好,我是默认助手。你可以立刻开始跟我聊天。",
"default.name": "默认助手",
"default.topic.name": "默认话题",
"tts": {
"play": "播放语音",
"stop": "停止播放",
"speak": "播放语音",
"stop_global": "停止所有语音播放",
"stopped": "已停止语音播放"
},
"history": {
"assistant_node": "助手",
"click_to_navigate": "点击跳转到对应消息",
@ -1575,6 +1604,171 @@
"privacy": {
"title": "隐私设置",
"enable_privacy_mode": "匿名发送错误报告和数据统计"
},
"voice": {
"title": "语音功能",
"help": "语音功能包括文本转语音(TTS)、语音识别(ASR)和语音通话。",
"learn_more": "了解更多"
},
"tts": {
"title": "语音合成",
"tab_title": "语音合成",
"enable": "启用语音合成",
"enable.help": "启用后可以将文本转换为语音",
"reset": "重置",
"reset_title": "重置自定义音色和模型",
"reset_confirm": "确定要重置所有自定义音色和模型吗?这将删除所有已添加的自定义项。",
"reset_success": "重置成功",
"reset_help": "如果音色或模型显示异常,可以尝试重置所有自定义项",
"api_settings": "API设置",
"service_type": "服务类型",
"service_type.openai": "OpenAI",
"service_type.edge": "浏览器 TTS",
"service_type.siliconflow": "硅基流动",
"service_type.mstts": "免费在线 TTS",
"service_type.refresh": "刷新TTS服务类型设置",
"service_type.refreshed": "已刷新TTS服务类型设置",
"siliconflow_api_key": "硅基流动API密钥",
"siliconflow_api_key.placeholder": "请输入硅基流动API密钥",
"siliconflow_api_url": "硅基流动API地址",
"siliconflow_api_url.placeholder": "例如https://api.siliconflow.cn/v1/audio/speech",
"siliconflow_voice": "硅基流动音色",
"siliconflow_voice.placeholder": "请选择音色",
"siliconflow_model": "硅基流动模型",
"siliconflow_model.placeholder": "请选择模型",
"siliconflow_response_format": "响应格式",
"siliconflow_response_format.placeholder": "默认为mp3",
"siliconflow_speed": "语速",
"siliconflow_speed.placeholder": "默认为1.0",
"api_key": "API密钥",
"api_key.placeholder": "请输入OpenAI API密钥",
"api_url": "API地址",
"api_url.placeholder": "例如https://api.openai.com/v1/audio/speech",
"edge_voice": "浏览器 TTS音色",
"edge_voice.loading": "加载中...",
"edge_voice.refresh": "刷新可用音色列表",
"edge_voice.not_found": "未找到匹配的音色",
"edge_voice.available_count": "可用语音: {{count}}个",
"edge_voice.refreshing": "正在刷新语音列表...",
"edge_voice.refreshed": "语音列表已刷新",
"mstts.voice": "免费在线 TTS音色",
"mstts.output_format": "输出格式",
"mstts.info": "免费在线TTS服务不需要API密钥完全免费使用。",
"error.no_mstts_voice": "未设置免费在线 TTS音色",
"voice": "音色",
"voice.placeholder": "请选择音色",
"voice_input_placeholder": "输入音色",
"voice_add": "添加",
"voice_empty": "暂无自定义音色,请在下方添加",
"model": "模型",
"model.placeholder": "请选择模型",
"model_input_placeholder": "输入模型",
"model_add": "添加",
"model_empty": "暂无自定义模型,请在下方添加",
"filter_options": "过滤选项",
"filter.thinking_process": "过滤思考过程",
"filter.markdown": "过滤Markdown标记",
"filter.code_blocks": "过滤代码块",
"filter.html_tags": "过滤HTML标签",
"filter.emojis": "过滤表情符号",
"max_text_length": "最大文本长度",
"show_progress_bar": "显示TTS进度条",
"test": "测试语音",
"help": "语音合成功能支持将文本转换为自然语音。",
"learn_more": "了解更多",
"play": "播放语音",
"stop": "停止播放",
"speak": "播放语音",
"stop_global": "停止所有语音播放",
"stopped": "已停止语音播放",
"segmented": "分段",
"segmented_play": "分段播放",
"segmented_playback": "分段播放",
"error": {
"not_enabled": "语音合成功能未启用",
"no_api_key": "未设置API密钥",
"no_voice": "未选择音色",
"no_model": "未选择模型",
"no_edge_voice": "未选择浏览器 TTS音色",
"browser_not_support": "浏览器不支持语音合成",
"synthesis_failed": "语音合成失败",
"play_failed": "语音播放失败",
"empty_text": "文本为空",
"general": "语音合成出现错误",
"unsupported_service_type": "不支持的服务类型: {{serviceType}}"
}
},
"asr": {
"title": "语音识别",
"tab_title": "语音识别",
"enable": "启用语音识别",
"enable.help": "启用后可以将语音转换为文本",
"service_type": "服务类型",
"service_type.browser": "浏览器",
"service_type.local": "本地服务器",
"api_key": "API密钥",
"api_key.placeholder": "请输入OpenAI API密钥",
"api_url": "API地址",
"api_url.placeholder": "例如https://api.openai.com/v1/audio/transcriptions",
"model": "模型",
"browser.info": "使用浏览器内置的语音识别功能,无需额外设置",
"local.info": "使用本地服务器和浏览器进行语音识别,需要先启动服务器并打开浏览器页面",
"local.browser_tip": "请在浏览器中打开此页面,并保持浏览器窗口打开",
"local.test_connection": "测试连接",
"local.connection_success": "连接成功",
"local.connection_failed": "连接失败,请确保服务器已启动",
"server.start": "启动服务器",
"server.stop": "停止服务器",
"server.starting": "正在启动服务器...",
"server.started": "服务器已启动",
"server.stopping": "正在停止服务器...",
"server.stopped": "服务器已停止",
"server.already_running": "服务器已经在运行中",
"server.not_running": "服务器未运行",
"server.start_failed": "启动服务器失败",
"server.stop_failed": "停止服务器失败",
"open_browser": "打开浏览器页面",
"test": "测试语音识别",
"test_info": "请在输入框中使用语音识别按钮进行测试",
"start": "开始录音",
"stop": "停止录音",
"preparing": "准备中",
"recording": "正在录音...",
"processing": "正在处理语音...",
"success": "语音识别成功",
"completed": "语音识别完成",
"canceled": "已取消录音",
"auto_start_server": "启动应用自动开启服务器",
"auto_start_server.help": "启用后,应用启动时会自动开启语音识别服务器",
"error": {
"not_enabled": "语音识别功能未启用",
"no_api_key": "未设置API密钥",
"browser_not_support": "浏览器不支持语音识别",
"start_failed": "开始录音失败",
"transcribe_failed": "语音识别失败"
}
},
"voice_call": {
"tab_title": "通话功能",
"enable": "启用语音通话",
"enable.help": "启用后可以使用语音通话功能与AI进行对话",
"model": "通话模型",
"model.select": "选择模型",
"model.current": "当前模型: {{model}}",
"model.info": "选择用于语音通话的AI模型不同模型可能有不同的语音交互体验",
"welcome_message": "您好我是您的AI助手请长按说话按钮进行对话。",
"prompt": {
"label": "语音通话提示词",
"placeholder": "请输入语音通话提示词",
"save": "保存",
"reset": "重置",
"saved": "提示词已保存",
"reset_done": "提示词已重置",
"info": "此提示词将指导AI在语音通话模式下的回复方式"
},
"asr_tts_info": "语音通话使用上面的语音识别(ASR)和语音合成(TTS)设置",
"test": "测试通话",
"test_info": "请使用输入框右侧的语音通话按钮进行测试"
}
},
"translate": {

View File

@ -104,6 +104,13 @@
"default.description": "你好,我是預設助手。你可以立即開始與我聊天。",
"default.name": "預設助手",
"default.topic.name": "預設話題",
"tts": {
"play": "播放語音",
"stop": "停止播放",
"speak": "播放語音",
"stop_global": "停止所有語音播放",
"stopped": "已停止語音播放"
},
"history": {
"assistant_node": "助手",
"click_to_navigate": "點擊跳轉到對應訊息",
@ -1388,6 +1395,7 @@
"title": "隱私設定",
"enable_privacy_mode": "匿名發送錯誤報告和資料統計"
},
<<<<<<< HEAD
"memory": {
"title": "記憶功能",
"description": "管理AI助手的長期記憶自動分析對話並提取重要信息",
@ -1445,6 +1453,172 @@
"totalAnalyses": "總分析次數",
"successRate": "成功率",
"avgAnalysisTime": "平均分析時間"
=======
"tts": {
"title": "語音設定",
"enable": "啟用語音合成",
"enable.help": "啟用後可以將文字轉換為語音",
"reset": "重置",
"reset_title": "重置自定義音色和模型",
"reset_confirm": "確定要重置所有自定義音色和模型嗎?這將刪除所有已添加的自定義項。",
"reset_success": "重置成功",
"reset_help": "如果音色或模型顯示異常,可以嘗試重置所有自定義項",
"api_settings": "API設定",
"service_type": "服務類型",
"service_type.openai": "OpenAI",
"service_type.edge": "Edge TTS",
"test": "測試",
"error": {
"not_enabled": "語音合成未啟用",
"no_edge_voice": "未選擇Edge TTS音色",
"no_api_key": "未設定API金鑰",
"browser_not_support": "瀏覽器不支援語音合成",
"no_voice": "未選擇音色",
"no_model": "未選擇模型",
"synthesis_failed": "語音合成失敗",
"play_failed": "語音播放失敗",
"empty_text": "文本為空",
"general": "語音合成出現錯誤",
"unsupported_service_type": "不支援的服務類型: {{serviceType}}"
},
"help": "使用OpenAI的TTS API需要API金鑰。Edge TTS使用瀏覽器功能不需要API金鑰。",
"learn_more": "了解更多",
"tab_title": "語音合成",
"service_type.refresh": "刷新TTS服務類型設置",
"service_type.refreshed": "已刷新TTS服務類型設置",
"api_key": "API金鑰",
"api_key.placeholder": "請輸入OpenAI API金鑰",
"api_url": "API位址",
"api_url.placeholder": "例如https://api.openai.com/v1/audio/speech",
"edge_voice": "瀏覽器 TTS音色",
"edge_voice.loading": "載入中...",
"edge_voice.refresh": "刷新可用音色列表",
"edge_voice.not_found": "未找到符合的音色",
"voice": "音色",
"voice.placeholder": "請選擇音色",
"voice_input_placeholder": "輸入音色",
"voice_add": "新增",
"voice_empty": "暫無自訂音色,請在下方新增",
"model": "模型",
"model.placeholder": "請選擇模型",
"model_input_placeholder": "輸入模型",
"model_add": "新增",
"model_empty": "暫無自訂模型,請在下方新增",
"filter_options": "篩選選項",
"filter.thinking_process": "篩選思考過程",
"filter.markdown": "篩選Markdown標記",
"filter.code_blocks": "篩選程式碼區塊",
"filter.html_tags": "篩選HTML標籤",
"max_text_length": "最大文字長度",
"service_type.siliconflow": "矽基流動",
"service_type.mstts": "免費線上 TTS",
"siliconflow_api_key": "矽基流動API金鑰",
"siliconflow_api_key.placeholder": "請輸入矽基流動API金鑰",
"siliconflow_api_url": "矽基流動API位址",
"siliconflow_api_url.placeholder": "例如https://api.siliconflow.cn/v1/audio/speech",
"siliconflow_voice": "矽基流動音色",
"siliconflow_voice.placeholder": "請選擇音色",
"siliconflow_model": "矽基流動模型",
"siliconflow_model.placeholder": "請選擇模型",
"siliconflow_response_format": "回應格式",
"siliconflow_response_format.placeholder": "預設為mp3",
"siliconflow_speed": "語速",
"siliconflow_speed.placeholder": "預設為1.0",
"edge_voice.available_count": "可用語音: {{count}}個",
"edge_voice.refreshing": "正在刷新語音列表...",
"edge_voice.refreshed": "語音列表已刷新",
"mstts.voice": "免費線上 TTS音色",
"mstts.output_format": "輸出格式",
"mstts.info": "免費線上TTS服務不需要API金鑰完全免費使用。",
"error.no_mstts_voice": "未設定免費線上 TTS音色",
"play": "播放語音",
"stop": "停止播放",
"speak": "播放語音",
"stop_global": "停止所有語音播放",
"stopped": "已停止語音播放",
"segmented": "分段",
"segmented_play": "分段播放",
"segmented_playback": "分段播放",
"filter.emojis": "篩選表情符號",
"show_progress_bar": "顯示TTS進度條"
},
"voice": {
"title": "語音功能",
"help": "語音功能包括文字轉語音(TTS)和語音辨識(ASR)。",
"learn_more": "了解更多"
},
"asr": {
"title": "語音辨識",
"tab_title": "語音辨識",
"enable": "啟用語音辨識",
"enable.help": "啟用後可以將語音轉換為文本",
"service_type": "服務類型",
"service_type.browser": "瀏覽器",
"service_type.local": "本地伺服器",
"api_key": "API金鑰",
"api_key.placeholder": "請輸入OpenAI API金鑰",
"api_url": "API位址",
"api_url.placeholder": "例如https://api.openai.com/v1/audio/transcriptions",
"model": "模型",
"browser.info": "使用瀏覽器內建的語音辨識功能,無需額外設定",
"local.info": "使用本地伺服器和瀏覽器進行語音辨識,需要先啟動伺服器並開啟瀏覽器頁面",
"local.browser_tip": "請在瀏覽器中開啟此頁面,並保持瀏覽器視窗開啟",
"local.test_connection": "測試連接",
"local.connection_success": "連接成功",
"local.connection_failed": "連接失敗,請確保伺服器已啟動",
"server.start": "啟動伺服器",
"server.stop": "停止伺服器",
"server.starting": "正在啟動伺服器...",
"server.started": "伺服器已啟動",
"server.stopping": "正在停止伺服器...",
"server.stopped": "伺服器已停止",
"server.already_running": "伺服器已經在執行中",
"server.not_running": "伺服器未執行",
"server.start_failed": "啟動伺服器失敗",
"server.stop_failed": "停止伺服器失敗",
"open_browser": "開啟瀏覽器頁面",
"test": "測試語音辨識",
"test_info": "請在輸入框中使用語音辨識按鈕進行測試",
"start": "開始錄音",
"stop": "停止錄音",
"preparing": "準備中",
"recording": "正在錄音...",
"processing": "正在處理語音...",
"success": "語音辨識成功",
"completed": "語音辨識完成",
"canceled": "已取消錄音",
"error": {
"not_enabled": "語音辨識功能未啟用",
"no_api_key": "未設定API金鑰",
"browser_not_support": "瀏覽器不支援語音辨識",
"start_failed": "開始錄音失敗",
"transcribe_failed": "語音辨識失敗"
},
"auto_start_server": "啟動應用程式自動開啟伺服器",
"auto_start_server.help": "啟用後,應用程式啟動時會自動開啟語音辨識伺服器"
},
"voice_call": {
"tab_title": "通話功能",
"enable": "啟用語音通話",
"enable.help": "啟用後可以使用語音通話功能與AI進行對話",
"model": "通話模型",
"model.select": "選擇模型",
"model.current": "目前模型: {{model}}",
"model.info": "選擇用於語音通話的AI模型不同模型可能有不同的語音互動體驗",
"prompt": {
"label": "語音通話提示詞",
"placeholder": "請輸入語音通話提示詞",
"save": "保存",
"reset": "重置",
"saved": "提示詞已保存",
"reset_done": "提示詞已重置",
"info": "此提示詞將指導AI在語音通話模式下的回覆方式"
},
"asr_tts_info": "語音通話使用上面的語音識別(ASR)和語音合成(TTS)設置",
"test": "測試通話",
"test_info": "請使用輸入框右側的語音通話按鈕進行測試",
"welcome_message": "您好我是您的AI助理請長按說話按鈕進行對話。"
>>>>>>> origin/1600822305-patch-2
}
},
"translate": {
@ -1485,6 +1659,28 @@
"quit": "結束",
"show_window": "顯示視窗",
"visualization": "視覺化"
},
"voice_call": {
"title": "語音通話",
"start": "開始語音通話",
"end": "結束通話",
"mute": "靜音",
"unmute": "取消靜音",
"pause": "暫停",
"resume": "繼續",
"you": "您",
"ai": "AI",
"press_to_talk": "長按說話",
"release_to_send": "放開傳送",
"initialization_failed": "初始化語音通話失敗",
"error": "語音通話出錯",
"initializing": "正在初始化語音通話...",
"ready": "語音通話已就緒",
"shortcut_key_setting": "語音辨識快速鍵設定",
"press_any_key": "請按任意鍵...",
"save": "儲存",
"cancel": "取消",
"shortcut_key_tip": "按下此快速鍵開始錄音,放開快速鍵結束錄音並傳送"
}
}
}

View File

@ -1,7 +1,18 @@
import { HolderOutlined } from '@ant-design/icons'
import {
CodeOutlined as _CodeOutlined,
FileSearchOutlined as _FileSearchOutlined,
HolderOutlined,
PaperClipOutlined as _PaperClipOutlined,
PauseCircleOutlined as _PauseCircleOutlined,
ThunderboltOutlined as _ThunderboltOutlined,
TranslationOutlined as _TranslationOutlined
} from '@ant-design/icons'
import ASRButton from '@renderer/components/ASRButton'
import { QuickPanelListItem, QuickPanelView, useQuickPanel } from '@renderer/components/QuickPanel'
import TranslateButton from '@renderer/components/TranslateButton'
import VoiceCallButton from '@renderer/components/VoiceCallButton'
import { isGenerateImageModel, isVisionModel, isWebSearchModel } from '@renderer/config/models'
import { getDefaultVoiceCallPrompt } from '@renderer/config/prompts'
import db from '@renderer/databases'
import { useAssistant } from '@renderer/hooks/useAssistant'
import { useKnowledgeBases } from '@renderer/hooks/useKnowledge'
@ -19,7 +30,7 @@ import { getModelUniqId } from '@renderer/services/ModelService'
import { estimateMessageUsage, estimateTextTokens as estimateTxtTokens } from '@renderer/services/TokenService'
import { translateText } from '@renderer/services/TranslateService'
import WebSearchService from '@renderer/services/WebSearchService'
import { useAppDispatch } from '@renderer/store'
import store, { useAppDispatch } from '@renderer/store'
import { sendMessage as _sendMessage } from '@renderer/store/messages'
import { setSearching } from '@renderer/store/runtime'
import { Assistant, FileType, KnowledgeBase, KnowledgeItem, MCPServer, Message, Model, Topic } from '@renderer/types'
@ -77,6 +88,8 @@ let _files: FileType[] = []
const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) => {
const [text, setText] = useState(_text)
// 用于存储语音识别的中间结果,不直接显示在输入框中
const [, setAsrCurrentText] = useState('')
const [inputFocus, setInputFocus] = useState(false)
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(_assistant.id)
const {
@ -525,7 +538,7 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
}, [files.length, model, openSelectFileMenu, t, text, translate])
const handleKeyDown = (event: React.KeyboardEvent<HTMLTextAreaElement>) => {
const isEnterPressed = event.keyCode == 13
const isEnterPressed = event.key === 'Enter'
// 检查是否是消息ID格式
if (isEnterPressed && !event.shiftKey) {
@ -905,10 +918,139 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
return newText
})
textareaRef.current?.focus()
})
}),
// 监听语音通话消息
EventEmitter.on(
EVENT_NAMES.VOICE_CALL_MESSAGE,
(data: {
text: string
model: any
isVoiceCall?: boolean
useVoiceCallModel?: boolean
voiceCallModelId?: string
}) => {
console.log('收到语音通话消息:', data)
// 先设置输入框文本
setText(data.text)
// 使用延时确保文本已经设置到输入框
setTimeout(() => {
// 直接调用发送消息函数而不检查inputEmpty
console.log('准备自动发送语音识别消息:', data.text)
// 直接使用正确的方式发送消息
// 创建用户消息
const userMessage = getUserMessage({
assistant,
topic,
type: 'text',
content: data.text
})
// 如果是语音通话消息,使用语音通话专用模型
if (data.isVoiceCall || data.useVoiceCallModel) {
// 从全局设置中获取语音通话专用模型
const { voiceCallModel } = store.getState().settings
// 打印调试信息
console.log('语音通话消息,尝试使用语音通话专用模型')
console.log('全局设置中的语音通话模型:', voiceCallModel ? JSON.stringify(voiceCallModel) : 'null')
console.log('事件中传递的模型:', data.model ? JSON.stringify(data.model) : 'null')
// 如果全局设置中有语音通话专用模型,优先使用
if (voiceCallModel) {
userMessage.model = voiceCallModel
console.log('使用全局设置中的语音通话专用模型:', voiceCallModel.name)
// 强制覆盖消息中的模型
userMessage.modelId = voiceCallModel.id
}
// 如果没有全局设置,但事件中传递了模型,使用事件中的模型
else if (data.model && typeof data.model === 'object') {
userMessage.model = data.model
console.log('使用事件中传递的模型:', data.model.name || data.model.id)
// 强制覆盖消息中的模型
userMessage.modelId = data.model.id
}
// 如果没有模型对象但有模型ID尝试使用模型ID
else if (data.voiceCallModelId) {
console.log('使用事件中传递的模型ID:', data.voiceCallModelId)
userMessage.modelId = data.voiceCallModelId
}
// 如果以上都没有,使用当前助手模型
else {
console.log('没有找到语音通话专用模型,使用当前助手模型')
}
}
// 非语音通话消息,使用当前助手模型
else if (data.model) {
const modelObj = assistant.model?.id === data.model.id ? assistant.model : undefined
if (modelObj) {
userMessage.model = modelObj
console.log('使用当前助手模型:', modelObj.name || modelObj.id)
}
}
// 如果是语音通话消息,创建一个新的助手对象,并设置模型和提示词
let assistantToUse = assistant
if (data.isVoiceCall || data.useVoiceCallModel) {
// 创建一个新的助手对象,以避免修改原始助手
assistantToUse = { ...assistant }
// 如果有语音通话专用模型,设置助手的模型
if (userMessage.model) {
assistantToUse.model = userMessage.model
console.log(
'为语音通话消息创建了新的助手对象,并设置了模型:',
userMessage.model.name || userMessage.model.id
)
}
// 获取用户自定义提示词
const { voiceCallPrompt } = store.getState().settings
// 使用自定义提示词或当前语言的默认提示词
const promptToUse = voiceCallPrompt || getDefaultVoiceCallPrompt()
// 如果助手已经有提示词,则在其后添加语音通话专属提示词
if (assistantToUse.prompt) {
assistantToUse.prompt += '\n\n' + promptToUse
} else {
assistantToUse.prompt = promptToUse
}
console.log('为语音通话消息添加了专属提示词')
}
// 分发发送消息的action
dispatch(_sendMessage(userMessage, assistantToUse, topic, {}))
// 清空输入框
setText('')
// 重置语音识别状态
setAsrCurrentText('')
console.log('已触发发送消息事件')
}, 300)
}
)
]
return () => unsubscribes.forEach((unsub) => unsub())
}, [addNewTopic, resizeTextArea])
}, [
addNewTopic,
resizeTextArea,
sendMessage,
model,
inputEmpty,
loading,
dispatch,
assistant,
topic,
setText
// getUserMessage 和 _sendMessage 是外部作用域值,不需要作为依赖项
])
useEffect(() => {
textareaRef.current?.focus()
@ -1198,6 +1340,32 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
</ToolbarMenu>
<ToolbarMenu>
<TranslateButton text={text} onTranslated={onTranslated} isLoading={isTranslating} />
<ASRButton
onTranscribed={(transcribedText, isFinal) => {
// 如果是空字符串,不做任何处理
if (!transcribedText) return
if (isFinal) {
// 最终结果,添加到输入框中
setText((prevText) => {
// 如果当前输入框为空,直接设置为识别的文本
if (!prevText.trim()) {
return transcribedText
}
// 否则,添加识别的文本到输入框中,用空格分隔
return prevText + ' ' + transcribedText
})
// 清除当前识别的文本
setAsrCurrentText('')
} else {
// 中间结果,保存到状态变量中,但不更新输入框
setAsrCurrentText(transcribedText)
}
}}
/>
<VoiceCallButton disabled={loading} />
{loading && (
<Tooltip placement="top" title={t('chat.input.pause')} arrow>
<ToolbarButton type="text" onClick={onPause} style={{ marginRight: -2, marginTop: 1 }}>

View File

@ -1,16 +1,22 @@
import TTSProgressBar from '@renderer/components/TTSProgressBar'
import { FONT_FAMILY } from '@renderer/config/constant'
import { useAssistant } from '@renderer/hooks/useAssistant'
import { useModel } from '@renderer/hooks/useModel'
import { useRuntime } from '@renderer/hooks/useRuntime'
import { useMessageStyle, useSettings } from '@renderer/hooks/useSettings'
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
import { getMessageModelId } from '@renderer/services/MessagesService'
import { getModelUniqId } from '@renderer/services/ModelService'
import TTSService from '@renderer/services/TTSService'
import { RootState, useAppDispatch } from '@renderer/store'
import { setLastPlayedMessageId, setSkipNextAutoTTS } from '@renderer/store/settings'
import { Assistant, Message, Topic } from '@renderer/types'
import { classNames } from '@renderer/utils'
import { Divider, Dropdown } from 'antd'
import { ItemType } from 'antd/es/menu/interface'
import { Dispatch, FC, memo, SetStateAction, useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useSelector } from 'react-redux'
import styled from 'styled-components'
import MessageContent from './MessageContent'
@ -47,10 +53,17 @@ const MessageItem: FC<Props> = ({
const model = useModel(getMessageModelId(message), message.model?.provider) || message.model
const { isBubbleStyle } = useMessageStyle()
const { showMessageDivider, messageFont, fontSize } = useSettings()
const { generating } = useRuntime()
const messageContainerRef = useRef<HTMLDivElement>(null)
// const topic = useTopic(assistant, _topic?.id)
const [contextMenuPosition, setContextMenuPosition] = useState<{ x: number; y: number } | null>(null)
const [selectedQuoteText, setSelectedQuoteText] = useState<string>('')
// 获取TTS设置
const { ttsEnabled, isVoiceCallActive, lastPlayedMessageId, skipNextAutoTTS } = useSelector(
(state: RootState) => state.settings
)
const dispatch = useAppDispatch()
const [selectedText, setSelectedText] = useState<string>('')
const isLastMessage = index === 0
@ -95,6 +108,123 @@ const MessageItem: FC<Props> = ({
}
}, [])
// 使用 ref 跟踪消息状态变化
const prevGeneratingRef = useRef(generating)
// 更新 prevGeneratingRef 的值
useEffect(() => {
// 在每次渲染后更新 ref 值
prevGeneratingRef.current = generating
}, [generating])
// 监听新消息生成,并在新消息生成时重置 skipNextAutoTTS
useEffect(() => {
// 如果从生成中变为非生成中,说明新消息刚刚生成完成
if (
prevGeneratingRef.current &&
!generating &&
isLastMessage &&
isAssistantMessage &&
message.status === 'success'
) {
console.log('新消息生成完成消息ID:', message.id)
// 当新消息生成完成时,始终重置 skipNextAutoTTS 为 false
// 这样确保新生成的消息可以自动播放
console.log('新消息生成完成,重置 skipNextAutoTTS 为 false')
dispatch(setSkipNextAutoTTS(false))
}
}, [isLastMessage, isAssistantMessage, message.status, message.id, generating, dispatch, prevGeneratingRef])
// 当消息内容变化时,重置 skipNextAutoTTS
useEffect(() => {
// 如果是最后一条助手消息,且消息状态为成功,且消息内容不为空
if (
isLastMessage &&
isAssistantMessage &&
message.status === 'success' &&
message.content &&
message.content.trim()
) {
// 如果是新生成的消息,重置 skipNextAutoTTS 为 false
if (message.id !== lastPlayedMessageId) {
console.log(
'检测到新消息,重置 skipNextAutoTTS 为 false消息ID:',
message.id,
'消息内容前20个字符:',
message.content?.substring(0, 20)
)
dispatch(setSkipNextAutoTTS(false))
}
}
}, [isLastMessage, isAssistantMessage, message.status, message.content, message.id, lastPlayedMessageId, dispatch])
// 自动播放TTS的逻辑
useEffect(() => {
// 如果是最后一条助手消息且消息状态为成功且不是正在生成中且TTS已启用
// 注意只有在语音通话窗口打开时才自动播放TTS
if (isLastMessage && isAssistantMessage && message.status === 'success' && !generating && ttsEnabled) {
// 如果语音通话窗口没有打开则不自动播放TTS
if (!isVoiceCallActive) {
console.log('不自动播放TTS因为语音通话窗口没有打开:', isVoiceCallActive)
return
}
// 检查是否需要跳过自动TTS
if (skipNextAutoTTS) {
console.log(
'跳过自动TTS因为 skipNextAutoTTS 为 true消息ID:',
message.id,
'消息内容前20个字符:',
message.content?.substring(0, 20),
'消息状态:',
message.status,
'是否最后一条消息:',
isLastMessage,
'是否助手消息:',
isAssistantMessage,
'是否正在生成中:',
generating,
'语音通话窗口状态:',
isVoiceCallActive
)
// 注意:不在这里重置 skipNextAutoTTS而是在新消息生成时重置
return
}
console.log(
'准备自动播放TTS因为 skipNextAutoTTS 为 false消息ID:',
message.id,
'消息内容前20个字符:',
message.content?.substring(0, 20)
)
// 检查消息是否有内容,且消息是新的(不是上次播放过的消息)
if (message.content && message.content.trim() && message.id !== lastPlayedMessageId) {
console.log('自动播放最新助手消息的TTS:', message.id, '语音通话窗口状态:', isVoiceCallActive)
// 更新最后播放的消息ID
dispatch(setLastPlayedMessageId(message.id))
// 使用延时确保消息已完全加载
setTimeout(() => {
TTSService.speakFromMessage(message)
}, 500)
} else if (message.id === lastPlayedMessageId) {
console.log('不自动播放TTS因为该消息已经播放过:', message.id)
}
}
}, [
isLastMessage,
isAssistantMessage,
message,
generating,
ttsEnabled,
isVoiceCallActive,
lastPlayedMessageId,
skipNextAutoTTS,
dispatch
])
const messageHighlightHandler = useCallback((highlight: boolean = true) => {
if (messageContainerRef.current) {
messageContainerRef.current.scrollIntoView({ behavior: 'smooth' })
@ -154,6 +284,11 @@ const MessageItem: FC<Props> = ({
<MessageErrorBoundary>
<MessageContent message={message} model={model} />
</MessageErrorBoundary>
{isAssistantMessage && (
<ProgressBarWrapper>
<TTSProgressBar messageId={message.id} />
</ProgressBarWrapper>
)}
{showMenubar && (
<MessageFooter
style={{
@ -192,6 +327,7 @@ const getContextMenuItems = (
t: (key: string) => string,
selectedQuoteText: string,
selectedText: string,
<<<<<<< HEAD
message: Message
): ItemType[] => {
const items: ItemType[] = []
@ -214,6 +350,47 @@ const getContextMenuItems = (
EventEmitter.emit(EVENT_NAMES.QUOTE_TEXT, selectedQuoteText)
}
})
=======
currentMessage?: Message
) => [
{
key: 'copy',
label: t('common.copy'),
onClick: () => {
navigator.clipboard.writeText(selectedText)
window.message.success({ content: t('message.copied'), key: 'copy-message' })
}
},
{
key: 'quote',
label: t('chat.message.quote'),
onClick: () => {
EventEmitter.emit(EVENT_NAMES.QUOTE_TEXT, selectedQuoteText)
}
},
{
key: 'speak',
label: '朗读',
onClick: () => {
// 从选中的文本开始朗读后面的内容
if (selectedText && currentMessage?.content) {
// 找到选中文本在消息中的位置
const startIndex = currentMessage.content.indexOf(selectedText)
if (startIndex !== -1) {
// 获取选中文本及其后面的所有内容
const textToSpeak = currentMessage.content.substring(startIndex)
import('@renderer/services/TTSService').then(({ default: TTSService }) => {
TTSService.speak(textToSpeak)
})
} else {
// 如果找不到精确位置,则只朗读选中的文本
import('@renderer/services/TTSService').then(({ default: TTSService }) => {
TTSService.speak(selectedText)
})
}
}
}
>>>>>>> origin/1600822305-patch-2
}
// 添加复制消息ID选项但不显示ID
@ -282,4 +459,9 @@ const NewContextMessage = styled.div`
cursor: pointer;
`
const ProgressBarWrapper = styled.div`
width: 100%;
padding: 0 10px;
`
export default memo(MessageItem)

View File

@ -1,4 +1,5 @@
import { SyncOutlined, TranslationOutlined } from '@ant-design/icons'
import TTSHighlightedText from '@renderer/components/TTSHighlightedText'
import { isOpenAIWebSearch } from '@renderer/config/models'
import { getModelUniqId } from '@renderer/services/ModelService'
import { Message, Model } from '@renderer/types'
@ -7,7 +8,7 @@ import { withMessageThought } from '@renderer/utils/formats'
import { Collapse, Divider, Flex } from 'antd'
import { clone } from 'lodash'
import { Search } from 'lucide-react'
import React, { Fragment, useMemo } from 'react'
import React, { Fragment, useEffect, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'
import BarLoader from 'react-spinners/BarLoader'
import BeatLoader from 'react-spinners/BeatLoader'
@ -30,6 +31,23 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
const { t } = useTranslation()
const message = withMessageThought(clone(_message))
const isWebCitation = model && (isOpenAIWebSearch(model) || model.provider === 'openrouter')
const [isSegmentedPlayback, setIsSegmentedPlayback] = useState(false)
// 监听分段播放状态变化
useEffect(() => {
const handleSegmentedPlaybackUpdate = (event: CustomEvent) => {
const { isSegmentedPlayback } = event.detail
setIsSegmentedPlayback(isSegmentedPlayback)
}
// 添加事件监听器
window.addEventListener('tts-segmented-playback-update', handleSegmentedPlaybackUpdate as EventListener)
// 组件卸载时移除事件监听器
return () => {
window.removeEventListener('tts-segmented-playback-update', handleSegmentedPlaybackUpdate as EventListener)
}
}, [])
// HTML实体编码辅助函数
const encodeHTML = (str: string) => {
@ -204,6 +222,7 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
<Flex gap="8px" wrap style={{ marginBottom: 10 }}>
{message.mentions?.map((model) => <MentionTag key={getModelUniqId(model)}>{'@' + model.name}</MentionTag>)}
</Flex>
<<<<<<< HEAD
{message.referencedMessages && message.referencedMessages.length > 0 && (
<div>
{message.referencedMessages.map((refMsg, index) => (
@ -299,6 +318,15 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
<MessageTools message={message} />
</div>
<Markdown message={{ ...message, content: processedContent.replace(toolUseRegex, '') }} />
=======
<MessageThought message={message} />
<MessageTools message={message} />
{isSegmentedPlayback ? (
<TTSHighlightedText text={processedContent.replace(toolUseRegex, '')} />
) : (
<Markdown message={{ ...message, content: processedContent.replace(toolUseRegex, '') }} />
)}
>>>>>>> origin/1600822305-patch-2
{message.metadata?.generateImage && <MessageImage message={message} />}
{message.translatedContent && (
<Fragment>

View File

@ -2,6 +2,7 @@ import { CheckOutlined, EditOutlined, QuestionCircleOutlined, SyncOutlined } fro
import ObsidianExportPopup from '@renderer/components/Popups/ObsidianExportPopup'
import SelectModelPopup from '@renderer/components/Popups/SelectModelPopup'
import TextEditPopup from '@renderer/components/Popups/TextEditPopup'
import TTSButton from '@renderer/components/TTSButton'
import { isReasoningModel } from '@renderer/config/models'
import { TranslateLanguageOptions } from '@renderer/config/translate'
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
@ -72,6 +73,9 @@ const MessageMenubar: FC<Props> = (props) => {
const exportMenuOptions = useSelector((state: RootState) => state.settings.exportMenuOptions)
// 获取TTS设置
const ttsEnabled = useSelector((state: RootState) => state.settings.ttsEnabled)
const onCopy = useCallback(
(e: React.MouseEvent) => {
e.stopPropagation()
@ -146,7 +150,7 @@ const MessageMenubar: FC<Props> = (props) => {
// 解析编辑后的文本,提取图片 URL
const imageRegex = /!\[image-\d+\]\((.*?)\)/g
const imageUrls: string[] = []
let match
let match: RegExpExecArray | null
let content = editedText
while ((match = imageRegex.exec(editedText)) !== null) {
@ -215,6 +219,8 @@ const MessageMenubar: FC<Props> = (props) => {
[isTranslating, message, editMessage, setStreamMessage, commitStreamMessage, clearStreamMessage, t]
)
// TTS功能已移至TTSButton组件
const dropdownItems = useMemo(
() => [
{
@ -407,6 +413,7 @@ const MessageMenubar: FC<Props> = (props) => {
</ActionButton>
</Tooltip>
)}
{isAssistantMessage && ttsEnabled && <TTSButton message={message} className="message-action-button" />}
{!isUserMessage && (
<Dropdown
menu={{

View File

@ -30,6 +30,7 @@ import MessageAnchorLine from './MessageAnchorLine'
import MessageGroup from './MessageGroup'
import NarrowLayout from './NarrowLayout'
import Prompt from './Prompt'
import TTSStopButton from './TTSStopButton'
interface MessagesProps {
assistant: Assistant
@ -249,6 +250,7 @@ const Messages: React.FC<MessagesProps> = ({ assistant, topic, setActiveTopic })
</NarrowLayout>
{messageNavigation === 'anchor' && <MessageAnchorLine messages={displayMessages} />}
{messageNavigation === 'buttons' && <ChatNavigation containerId="messages" />}
<TTSStopButton />
</Container>
)
}

View File

@ -0,0 +1,88 @@
import { SoundOutlined } from '@ant-design/icons'
import TTSService from '@renderer/services/TTSService'
import { Tooltip } from 'antd'
import { useCallback, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
const TTSStopButton: React.FC = () => {
const { t } = useTranslation()
const [isVisible, setIsVisible] = useState(false)
// 添加TTS状态变化事件监听器
useEffect(() => {
const handleTTSStateChange = (event: CustomEvent) => {
const { isPlaying } = event.detail
console.log('全局TTS停止按钮检测到TTS状态变化:', isPlaying)
setIsVisible(isPlaying)
}
// 添加事件监听器
window.addEventListener('tts-state-change', handleTTSStateChange as EventListener)
// 初始检查当前状态
const isCurrentlyPlaying = TTSService.isCurrentlyPlaying()
setIsVisible(isCurrentlyPlaying)
// 组件卸载时移除事件监听器
return () => {
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener)
}
}, [])
// 停止TTS播放
const handleStopTTS = useCallback(async () => {
console.log('点击全局停止TTS按钮')
// 强制停止所有TTS播放
TTSService.stop()
// 不需要手动设置状态,事件监听器会处理
// 显示停止消息
window.message.success({ content: t('chat.tts.stopped', { defaultValue: '已停止语音播放' }), key: 'tts-stopped' })
}, [t])
if (!isVisible) return null
return (
<StopButtonContainer>
<Tooltip title={t('chat.tts.stop_global')}>
<ActionButton onClick={handleStopTTS}>
<SoundOutlined />
</ActionButton>
</Tooltip>
</StopButtonContainer>
)
}
const StopButtonContainer = styled.div`
position: fixed;
bottom: 150px; /* 从100px改为150px向上移动50px */
right: 20px;
z-index: 1000;
`
const ActionButton = styled.div`
cursor: pointer;
border-radius: 8px;
display: flex;
flex-direction: row;
justify-content: center;
align-items: center;
width: 30px;
height: 30px;
transition: all 0.2s ease;
background-color: var(--color-primary);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
&:hover {
background-color: var(--color-primary-soft);
}
.anticon {
cursor: pointer;
font-size: 14px;
color: var(--color-white);
}
`
export default TTSStopButton

View File

@ -22,6 +22,7 @@ import {
HardDrive,
Info,
LayoutGrid,
Mic,
MonitorCog,
Package,
Rocket,
@ -47,6 +48,7 @@ import ProvidersList from './ProviderSettings'
import QuickAssistantSettings from './QuickAssistantSettings'
import QuickPhraseSettings from './QuickPhraseSettings'
import ShortcutSettings from './ShortcutSettings'
import TTSSettings from './TTSSettings/TTSSettings'
import WebSearchSettings from './WebSearchSettings'
const SettingsPage: FC = () => {
@ -139,6 +141,12 @@ const SettingsPage: FC = () => {
{t('settings.data.title')}
</MenuItem>
</MenuItemLink>
<MenuItemLink to="/settings/tts">
<MenuItem className={isRoute('/settings/tts')}>
<Mic size={18} />
{t('settings.voice.title')}
</MenuItem>
</MenuItemLink>
<MenuItemLink to="/settings/about">
<MenuItem className={isRoute('/settings/about')}>
<Info size={18} />
@ -152,13 +160,18 @@ const SettingsPage: FC = () => {
<Route path="model" element={<ModelSettings />} />
<Route path="web-search" element={<WebSearchSettings />} />
<Route path="mcp/*" element={<MCPSettings />} />
<<<<<<< HEAD
<Route path="memory" element={<MemorySettings />} />
<Route path="general" element={<GeneralSettings />} />
=======
<Route path="general/*" element={<GeneralSettings />} />
>>>>>>> origin/1600822305-patch-2
<Route path="display" element={<DisplaySettings />} />
{showMiniAppSettings && <Route path="miniapps" element={<MiniAppSettings />} />}
<Route path="shortcut" element={<ShortcutSettings />} />
<Route path="quickAssistant" element={<QuickAssistantSettings />} />
<Route path="data" element={<DataSettings />} />
<Route path="data/*" element={<DataSettings />} />
<Route path="tts" element={<TTSSettings />} />
<Route path="about" element={<AboutSettings />} />
<Route path="quickPhrase" element={<QuickPhraseSettings />} />
</Routes>

View File

@ -0,0 +1,316 @@
import { GlobalOutlined, InfoCircleOutlined, PlayCircleOutlined, StopOutlined } from '@ant-design/icons'
import ASRServerService from '@renderer/services/ASRServerService'
import ASRService from '@renderer/services/ASRService'
import { useAppDispatch } from '@renderer/store'
import {
setAsrApiKey,
setAsrApiUrl,
setAsrAutoStartServer,
setAsrEnabled,
setAsrLanguage,
setAsrModel,
setAsrServiceType
} from '@renderer/store/settings'
import { Button, Form, Input, Select, Space, Switch } from 'antd'
import { FC, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useSelector } from 'react-redux'
import styled from 'styled-components'
const ASRSettings: FC = () => {
const { t } = useTranslation()
const dispatch = useAppDispatch()
// 服务器状态
const [isServerRunning, setIsServerRunning] = useState(false)
// 从 Redux 获取 ASR 设置
const asrEnabled = useSelector((state: any) => state.settings.asrEnabled)
const asrServiceType = useSelector((state: any) => state.settings.asrServiceType || 'openai')
const asrApiKey = useSelector((state: any) => state.settings.asrApiKey)
const asrApiUrl = useSelector((state: any) => state.settings.asrApiUrl)
const asrModel = useSelector((state: any) => state.settings.asrModel || 'whisper-1')
const asrAutoStartServer = useSelector((state: any) => state.settings.asrAutoStartServer)
const asrLanguage = useSelector((state: any) => state.settings.asrLanguage || 'zh-CN')
// 检查服务器状态
useEffect(() => {
if (asrServiceType === 'local') {
setIsServerRunning(ASRServerService.isRunning())
}
return undefined // 添加返回值以解决TS7030错误
}, [asrServiceType])
// 服务类型选项
const serviceTypeOptions = [
{ label: 'OpenAI', value: 'openai' },
{ label: t('settings.asr.service_type.local'), value: 'local' }
]
// 模型选项
const modelOptions = [{ label: 'whisper-1', value: 'whisper-1' }]
// 语言选项
const languageOptions = [
{ label: '中文 (Chinese)', value: 'zh-CN' },
{ label: 'English', value: 'en-US' },
{ label: '日本語 (Japanese)', value: 'ja-JP' },
{ label: 'Русский (Russian)', value: 'ru-RU' },
{ label: 'Français (French)', value: 'fr-FR' },
{ label: 'Deutsch (German)', value: 'de-DE' },
{ label: 'Español (Spanish)', value: 'es-ES' },
{ label: 'Italiano (Italian)', value: 'it-IT' },
{ label: 'Português (Portuguese)', value: 'pt-PT' },
{ label: '한국어 (Korean)', value: 'ko-KR' }
]
return (
<Container>
<Form layout="vertical">
{/* ASR开关 */}
<Form.Item>
<Space>
<Switch checked={asrEnabled} onChange={(checked) => dispatch(setAsrEnabled(checked))} />
<span>{t('settings.asr.enable')}</span>
<Tooltip title={t('settings.asr.enable.help')}>
<InfoCircleOutlined style={{ color: 'var(--color-text-3)' }} />
</Tooltip>
</Space>
</Form.Item>
{/* 服务类型选择 */}
<Form.Item label={t('settings.asr.service_type')} style={{ marginBottom: 16 }}>
<Select
value={asrServiceType}
onChange={(value) => dispatch(setAsrServiceType(value))}
options={serviceTypeOptions}
disabled={!asrEnabled}
style={{ width: '100%' }}
/>
</Form.Item>
{/* OpenAI ASR设置 */}
{asrServiceType === 'openai' && (
<>
{/* API密钥 */}
<Form.Item label={t('settings.asr.api_key')} style={{ marginBottom: 16 }}>
<Input.Password
value={asrApiKey}
onChange={(e) => dispatch(setAsrApiKey(e.target.value))}
placeholder={t('settings.asr.api_key.placeholder')}
disabled={!asrEnabled}
/>
</Form.Item>
{/* API地址 */}
<Form.Item label={t('settings.asr.api_url')} style={{ marginBottom: 16 }}>
<Input
value={asrApiUrl}
onChange={(e) => dispatch(setAsrApiUrl(e.target.value))}
placeholder={t('settings.asr.api_url.placeholder')}
disabled={!asrEnabled}
/>
</Form.Item>
{/* 模型选择 */}
<Form.Item label={t('settings.asr.model')} style={{ marginBottom: 16 }}>
<Select
value={asrModel}
onChange={(value) => dispatch(setAsrModel(value))}
options={modelOptions}
disabled={!asrEnabled}
style={{ width: '100%' }}
/>
</Form.Item>
</>
)}
{/* 浏览器ASR设置 */}
{asrServiceType === 'browser' && (
<Form.Item>
<Alert type="info">{t('settings.asr.browser.info')}</Alert>
</Form.Item>
)}
{/* 本地服务器ASR设置 */}
{asrServiceType === 'local' && (
<>
<Form.Item>
<Alert type="info">{t('settings.asr.local.info')}</Alert>
</Form.Item>
<Form.Item>
<Space direction="vertical" style={{ width: '100%' }}>
<Space>
<Button
type="primary"
icon={<PlayCircleOutlined />}
onClick={async () => {
const success = await ASRServerService.startServer()
if (success) {
setIsServerRunning(true)
}
}}
disabled={!asrEnabled || isServerRunning}>
{t('settings.asr.server.start')}
</Button>
<Button
danger
icon={<StopOutlined />}
onClick={async () => {
const success = await ASRServerService.stopServer()
if (success) {
setIsServerRunning(false)
}
}}
disabled={!asrEnabled || !isServerRunning}>
{t('settings.asr.server.stop')}
</Button>
</Space>
<Button
type="primary"
icon={<GlobalOutlined />}
onClick={() => window.open('http://localhost:34515', '_blank')}
disabled={!asrEnabled || !isServerRunning}>
{t('settings.asr.open_browser')}
</Button>
<Button
onClick={() => {
// 尝试连接到WebSocket服务器
ASRService.connectToWebSocketServer?.()
.then((connected) => {
if (connected) {
window.message.success({
content: t('settings.asr.local.connection_success'),
key: 'ws-connect'
})
} else {
window.message.error({
content: t('settings.asr.local.connection_failed'),
key: 'ws-connect'
})
}
})
.catch((error) => {
console.error('Failed to connect to WebSocket server:', error)
window.message.error({ content: t('settings.asr.local.connection_failed'), key: 'ws-connect' })
})
}}
disabled={!asrEnabled || !isServerRunning}>
{t('settings.asr.local.test_connection')}
</Button>
<BrowserTip>{t('settings.asr.local.browser_tip')}</BrowserTip>
{/* 语言选择 */}
<Form.Item label={t('settings.asr.language', { defaultValue: '语言' })} style={{ marginTop: 16 }}>
<Select
value={asrLanguage}
onChange={(value) => dispatch(setAsrLanguage(value))}
options={languageOptions}
disabled={!asrEnabled}
style={{ width: '100%' }}
showSearch
optionFilterProp="label"
/>
</Form.Item>
{/* 启动应用自动开启服务器 */}
<Form.Item style={{ marginTop: 16 }}>
<Space>
<Switch
checked={asrAutoStartServer}
onChange={(checked) => dispatch(setAsrAutoStartServer(checked))}
disabled={!asrEnabled}
/>
<span>{t('settings.asr.auto_start_server')}</span>
<Tooltip title={t('settings.asr.auto_start_server.help')}>
<InfoCircleOutlined style={{ color: 'var(--color-text-3)' }} />
</Tooltip>
</Space>
</Form.Item>
</Space>
</Form.Item>
</>
)}
{/* 测试按钮 */}
<Form.Item>
<Space>
<Button
type="primary"
disabled={!asrEnabled}
onClick={() => window.message.info({ content: t('settings.asr.test_info'), key: 'asr-test' })}>
{t('settings.asr.test')}
</Button>
</Space>
</Form.Item>
</Form>
</Container>
)
}
const Container = styled.div`
padding: 0 0 20px 0;
`
const Tooltip = styled.div`
position: relative;
display: inline-block;
cursor: help;
&:hover::after {
content: attr(title);
position: absolute;
bottom: 100%;
left: 50%;
transform: translateX(-50%);
padding: 5px 10px;
background-color: var(--color-background-soft);
border: 1px solid var(--color-border);
border-radius: 4px;
white-space: nowrap;
z-index: 1;
font-size: 12px;
}
`
const Alert = styled.div<{ type: 'info' | 'warning' | 'error' | 'success' }>`
padding: 10px 15px;
border-radius: 4px;
margin-bottom: 16px;
background-color: ${(props) =>
props.type === 'info'
? 'var(--color-info-bg)'
: props.type === 'warning'
? 'var(--color-warning-bg)'
: props.type === 'error'
? 'var(--color-error-bg)'
: 'var(--color-success-bg)'};
border: 1px solid
${(props) =>
props.type === 'info'
? 'var(--color-info-border)'
: props.type === 'warning'
? 'var(--color-warning-border)'
: props.type === 'error'
? 'var(--color-error-border)'
: 'var(--color-success-border)'};
color: ${(props) =>
props.type === 'info'
? 'var(--color-info-text)'
: props.type === 'warning'
? 'var(--color-warning-text)'
: props.type === 'error'
? 'var(--color-error-text)'
: 'var(--color-success-text)'};
`
const BrowserTip = styled.div`
font-size: 12px;
color: var(--color-text-3);
margin-top: 8px;
`
export default ASRSettings

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,165 @@
import { InfoCircleOutlined, PhoneOutlined, ReloadOutlined } from '@ant-design/icons'
import SelectModelPopup from '@renderer/components/Popups/SelectModelPopup'
import { getModelLogo } from '@renderer/config/models'
import { DEFAULT_VOICE_CALL_PROMPT } from '@renderer/config/prompts'
import { useAppDispatch } from '@renderer/store'
import { setVoiceCallEnabled, setVoiceCallModel, setVoiceCallPrompt } from '@renderer/store/settings'
import { Button, Form, Input, Space, Switch, Tooltip as AntTooltip } from 'antd'
import { FC, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useSelector } from 'react-redux'
import styled from 'styled-components'
const VoiceCallSettings: FC = () => {
const { t } = useTranslation()
const dispatch = useAppDispatch()
// 从 Redux 获取通话功能设置
const voiceCallEnabled = useSelector((state: any) => state.settings.voiceCallEnabled ?? true)
const voiceCallModel = useSelector((state: any) => state.settings.voiceCallModel)
const voiceCallPrompt = useSelector((state: any) => state.settings.voiceCallPrompt)
// 提示词编辑状态
const [promptText, setPromptText] = useState<string>(voiceCallPrompt || DEFAULT_VOICE_CALL_PROMPT)
// 模型选择状态
const [, setIsSelectingModel] = useState(false)
// 选择模型
const handleSelectModel = async () => {
setIsSelectingModel(true)
try {
const model = await SelectModelPopup.show({})
if (model) {
dispatch(setVoiceCallModel(model))
}
} catch (error) {
console.error('选择模型失败:', error)
} finally {
setIsSelectingModel(false)
}
}
// 保存提示词
const handleSavePrompt = () => {
dispatch(setVoiceCallPrompt(promptText))
window.message.success({ content: t('settings.voice_call.prompt.saved'), key: 'voice-call-prompt' })
}
// 重置提示词
const handleResetPrompt = () => {
setPromptText(DEFAULT_VOICE_CALL_PROMPT)
dispatch(setVoiceCallPrompt(null))
window.message.success({ content: t('settings.voice_call.prompt.reset_done'), key: 'voice-call-prompt' })
}
return (
<Container>
<Form layout="vertical">
{/* 通话功能开关 */}
<Form.Item>
<Space>
<Switch checked={voiceCallEnabled} onChange={(checked) => dispatch(setVoiceCallEnabled(checked))} />
<span>{t('settings.voice_call.enable')}</span>
<AntTooltip title={t('settings.voice_call.enable.help')}>
<InfoCircleOutlined style={{ color: 'var(--color-text-3)' }} />
</AntTooltip>
</Space>
</Form.Item>
{/* 模型选择 */}
<Form.Item label={t('settings.voice_call.model')} style={{ marginBottom: 16 }}>
<Space>
<Button
onClick={handleSelectModel}
disabled={!voiceCallEnabled}
icon={
voiceCallModel ? (
<ModelIcon
src={getModelLogo(voiceCallModel.id)}
alt="Model logo"
style={{ width: 20, height: 20, borderRadius: 10 }}
/>
) : (
<PhoneOutlined style={{ marginRight: 8 }} />
)
}>
{voiceCallModel ? voiceCallModel.name : t('settings.voice_call.model.select')}
</Button>
{voiceCallModel && (
<InfoText>{t('settings.voice_call.model.current', { model: voiceCallModel.name })}</InfoText>
)}
</Space>
<InfoText>{t('settings.voice_call.model.info')}</InfoText>
</Form.Item>
{/* 提示词设置 */}
<Form.Item label={t('settings.voice_call.prompt.label')} style={{ marginBottom: 16 }}>
<Input.TextArea
value={promptText}
onChange={(e) => setPromptText(e.target.value)}
disabled={!voiceCallEnabled}
rows={8}
placeholder={t('settings.voice_call.prompt.placeholder')}
/>
<InfoText>{t('settings.voice_call.prompt.info')}</InfoText>
<Space style={{ marginTop: 8 }}>
<Button type="primary" onClick={handleSavePrompt} disabled={!voiceCallEnabled}>
{t('settings.voice_call.prompt.save')}
</Button>
<Button onClick={handleResetPrompt} disabled={!voiceCallEnabled} icon={<ReloadOutlined />}>
{t('settings.voice_call.prompt.reset')}
</Button>
</Space>
</Form.Item>
</Form>
</Container>
)
}
const Container = styled.div`
padding: 0 0 20px 0;
`
const InfoText = styled.div`
color: var(--color-text-3);
font-size: 12px;
margin-top: 4px;
`
const ModelIcon = styled.img`
width: 20px;
height: 20px;
border-radius: 10px;
margin-top: 4px;
`
// const Alert = styled.div<{ type: 'info' | 'warning' | 'error' | 'success' }>`
// padding: 8px 12px;
// border-radius: 4px;
// background-color: ${(props) =>
// props.type === 'info'
// ? 'var(--color-info-bg)'
// : props.type === 'warning'
// ? 'var(--color-warning-bg)'
// : props.type === 'error'
// ? 'var(--color-error-bg)'
// : 'var(--color-success-bg)'};
// border: 1px solid
// ${(props) =>
// props.type === 'info'
// ? 'var(--color-info-border)'
// : props.type === 'warning'
// ? 'var(--color-warning-border)'
// : props.type === 'error'
// ? 'var(--color-error-border)'
// : 'var(--color-success-border)'};
// color: ${(props) =>
// props.type === 'info'
// ? 'var(--color-info-text)'
// : props.type === 'warning'
// ? 'var(--color-warning-text)'
// : props.type === 'error'
// ? 'var(--color-error-text)'
// : 'var(--color-success-text)'};
// `
export default VoiceCallSettings

View File

@ -0,0 +1,152 @@
import i18n from '@renderer/i18n'
// 使用window.electron而不是直接导入electron模块
// 这样可以避免__dirname不可用的问题
class ASRServerService {
private serverProcess: any = null
private isServerRunning = false
/**
* ASR服务器
* @returns Promise<boolean>
*/
startServer = async (): Promise<boolean> => {
if (this.isServerRunning) {
console.log('[ASRServerService] 服务器已经在运行中')
// 安全地调用window.message
if (window.message) {
window.message.info({ content: i18n.t('settings.asr.server.already_running'), key: 'asr-server' })
}
return true
}
try {
console.log('[ASRServerService] 正在启动ASR服务器...')
// 安全地调用window.message
if (window.message) {
window.message.loading({ content: i18n.t('settings.asr.server.starting'), key: 'asr-server' })
}
// 使用IPC调用主进程启动服务器
const result = await window.api.asrServer.startServer()
if (result.success) {
this.isServerRunning = true
this.serverProcess = result.pid
console.log('[ASRServerService] ASR服务器启动成功PID:', result.pid)
if (window.message) {
window.message.success({ content: i18n.t('settings.asr.server.started'), key: 'asr-server' })
}
return true
} else {
console.error('[ASRServerService] ASR服务器启动失败:', result.error)
if (window.message) {
window.message.error({
content: i18n.t('settings.asr.server.start_failed') + ': ' + result.error,
key: 'asr-server'
})
}
return false
}
} catch (error) {
console.error('[ASRServerService] 启动ASR服务器时出错:', error)
if (window.message) {
window.message.error({
content: i18n.t('settings.asr.server.start_failed') + ': ' + (error as Error).message,
key: 'asr-server'
})
}
return false
}
}
/**
* ASR服务器
* @returns Promise<boolean>
*/
stopServer = async (): Promise<boolean> => {
if (!this.isServerRunning || !this.serverProcess) {
console.log('[ASRServerService] 服务器未运行')
if (window.message) {
window.message.info({ content: i18n.t('settings.asr.server.not_running'), key: 'asr-server' })
}
return true
}
try {
console.log('[ASRServerService] 正在停止ASR服务器...')
if (window.message) {
window.message.loading({ content: i18n.t('settings.asr.server.stopping'), key: 'asr-server' })
}
// 使用IPC调用主进程停止服务器
const result = await window.api.asrServer.stopServer(this.serverProcess)
if (result.success) {
this.isServerRunning = false
this.serverProcess = null
console.log('[ASRServerService] ASR服务器已停止')
if (window.message) {
window.message.success({ content: i18n.t('settings.asr.server.stopped'), key: 'asr-server' })
}
return true
} else {
console.error('[ASRServerService] ASR服务器停止失败:', result.error)
if (window.message) {
window.message.error({
content: i18n.t('settings.asr.server.stop_failed') + ': ' + result.error,
key: 'asr-server'
})
}
return false
}
} catch (error) {
console.error('[ASRServerService] 停止ASR服务器时出错:', error)
if (window.message) {
window.message.error({
content: i18n.t('settings.asr.server.stop_failed') + ': ' + (error as Error).message,
key: 'asr-server'
})
}
return false
}
}
/**
* ASR服务器是否正在运行
* @returns boolean
*/
isRunning = (): boolean => {
return this.isServerRunning
}
/**
* ASR服务器网页URL
* @returns string URL
*/
getServerUrl = (): string => {
console.log('[ASRServerService] 获取服务器URL: http://localhost:34515')
return 'http://localhost:34515'
}
/**
* ASR服务器文件路径
* @returns string
*/
getServerFilePath = (): string => {
// 使用相对路径因为window.electron.app.getAppPath()不可用
return process.env.NODE_ENV === 'development'
? 'src/renderer/src/assets/asr-server/server.js'
: 'public/asr-server/server.js'
}
/**
* ASR服务器网页
*/
openServerPage = (): void => {
window.open(this.getServerUrl(), '_blank')
}
}
export default new ASRServerService()

View File

@ -0,0 +1,656 @@
import i18n from '@renderer/i18n'
import store from '@renderer/store'
/**
* ASR服务
*/
class ASRService {
private mediaRecorder: MediaRecorder | null = null
private audioChunks: Blob[] = []
private isRecording = false
private stream: MediaStream | null = null
// WebSocket相关
private ws: WebSocket | null = null
private wsConnected = false
private browserReady = false
private reconnectAttempt = 0
private maxReconnectAttempts = 5
private reconnectTimeout: NodeJS.Timeout | null = null
/**
*
* @returns Promise<void>
*/
/**
* WebSocket服务器
* @returns Promise<boolean>
*/
connectToWebSocketServer = async (): Promise<boolean> => {
return new Promise((resolve) => {
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
console.log('[ASRService] WebSocket已连接')
resolve(true)
return
}
if (this.ws && this.ws.readyState === WebSocket.CONNECTING) {
console.log('[ASRService] WebSocket正在连接中')
// 等待连接完成
this.ws.onopen = () => {
console.log('[ASRService] WebSocket连接成功')
this.wsConnected = true
this.reconnectAttempt = 0
this.ws?.send(JSON.stringify({ type: 'identify', role: 'electron' }))
resolve(true)
}
this.ws.onerror = () => {
console.error('[ASRService] WebSocket连接失败')
this.wsConnected = false
resolve(false)
}
return
}
// 关闭之前的连接
if (this.ws) {
try {
this.ws.close()
} catch (e) {
console.error('[ASRService] 关闭WebSocket连接失败:', e)
}
}
// 创建新连接
try {
console.log('[ASRService] 正在连接WebSocket服务器...')
window.message.loading({ content: '正在连接语音识别服务...', key: 'ws-connect' })
this.ws = new WebSocket('ws://localhost:34515') // 使用正确的端口 34515
this.wsConnected = false
this.browserReady = false
this.ws.onopen = () => {
console.log('[ASRService] WebSocket连接成功')
window.message.success({ content: '语音识别服务连接成功', key: 'ws-connect' })
this.wsConnected = true
this.reconnectAttempt = 0
this.ws?.send(JSON.stringify({ type: 'identify', role: 'electron' }))
resolve(true)
}
this.ws.onclose = () => {
console.log('[ASRService] WebSocket连接关闭')
this.wsConnected = false
this.browserReady = false
this.attemptReconnect()
}
this.ws.onerror = (error) => {
console.error('[ASRService] WebSocket连接错误:', error)
this.wsConnected = false
window.message.error({ content: '语音识别服务连接失败', key: 'ws-connect' })
resolve(false)
}
this.ws.onmessage = this.handleWebSocketMessage
} catch (error) {
console.error('[ASRService] 创建WebSocket连接失败:', error)
window.message.error({ content: '语音识别服务连接失败', key: 'ws-connect' })
resolve(false)
}
})
}
/**
* WebSocket消息
*/
private handleWebSocketMessage = (event: MessageEvent) => {
try {
const data = JSON.parse(event.data)
console.log('[ASRService] 收到WebSocket消息:', data)
if (data.type === 'status') {
if (data.message === 'browser_ready' || data.message === 'Browser connected') {
console.log('[ASRService] 浏览器已准备好')
this.browserReady = true
window.message.success({ content: '语音识别浏览器已准备好', key: 'browser-status' })
} else if (data.message === 'Browser disconnected' || data.message === 'Browser connection error') {
console.log('[ASRService] 浏览器断开连接')
this.browserReady = false
window.message.error({ content: '语音识别浏览器断开连接', key: 'browser-status' })
} else if (data.message === 'stopped') {
// 语音识别已停止
console.log('[ASRService] 语音识别已停止')
this.isRecording = false
// 如果没有收到最终结果,显示处理完成消息
window.message.success({ content: i18n.t('settings.asr.completed'), key: 'asr-processing' })
} else if (data.message === 'reset_complete') {
// 语音识别已重置
console.log('[ASRService] 语音识别已强制重置')
this.isRecording = false
// 保存当前回调函数并立即清除
const tempCallback = this.resultCallback
this.resultCallback = null
// 显示重置完成消息
window.message.info({ content: '语音识别已重置', key: 'asr-reset' })
// 如果有回调函数,调用一次空字符串,触发按钮状态重置
if (tempCallback && typeof tempCallback === 'function') {
// 使用空字符串调用回调,不会影响输入框,但可以触发按钮状态重置
const callback = tempCallback as (text: string, isFinal?: boolean) => void // 明确指定类型
setTimeout(() => {
callback('', false)
}, 100)
}
}
} else if (data.type === 'result' && data.data) {
// 处理识别结果
console.log('[ASRService] 收到识别结果:', data.data)
// 如果已经停止录音但仍然收到结果,检查是否是最终结果
if (!this.isRecording && !data.data.isFinal) {
console.log('[ASRService] 已停止录音但收到非最终结果,忽略')
return
}
if (this.resultCallback && typeof this.resultCallback === 'function') {
// 将所有结果都传递给回调函数并包含isFinal状态
if (data.data.text && data.data.text.trim()) {
if (data.data.isFinal) {
console.log('[ASRService] 收到最终结果,调用回调函数,文本:', data.data.text)
// 不再清除回调函数,允许继续处理后续语音
// const tempCallback = this.resultCallback
// this.resultCallback = null
// 直接调用回调函数
this.resultCallback(data.data.text, true)
window.message.success({ content: i18n.t('settings.asr.success'), key: 'asr-processing' })
} else if (this.isRecording) {
// 只在录音中才处理中间结果
// 非最终结果,也调用回调,但标记为非最终
console.log('[ASRService] 收到中间结果,调用回调函数,文本:', data.data.text)
this.resultCallback(data.data.text, false)
}
} else {
console.log('[ASRService] 识别结果为空,不调用回调')
}
} else {
console.warn('[ASRService] 没有设置结果回调函数')
}
} else if (data.type === 'error') {
console.error('[ASRService] 收到错误消息:', data.message || data.data)
window.message.error({
content: `语音识别错误: ${data.message || data.data?.error || '未知错误'}`,
key: 'asr-error'
})
}
} catch (error) {
console.error('[ASRService] 解析WebSocket消息失败:', error, event.data)
}
}
/**
* WebSocket服务器
*/
private attemptReconnect = () => {
if (this.reconnectTimeout) {
clearTimeout(this.reconnectTimeout)
this.reconnectTimeout = null
}
if (this.reconnectAttempt >= this.maxReconnectAttempts) {
console.log('[ASRService] 达到最大重连次数,停止重连')
return
}
const delay = Math.min(1000 * Math.pow(2, this.reconnectAttempt), 30000)
console.log(
`[ASRService] 将在 ${delay}ms 后尝试重连 (尝试 ${this.reconnectAttempt + 1}/${this.maxReconnectAttempts})`
)
this.reconnectTimeout = setTimeout(() => {
this.reconnectAttempt++
this.connectToWebSocketServer().catch(console.error)
}, delay)
}
// 存储结果回调函数
resultCallback: ((text: string, isFinal?: boolean) => void) | null = null
startRecording = async (onTranscribed?: (text: string, isFinal?: boolean) => void): Promise<void> => {
try {
const { asrEnabled, asrServiceType } = store.getState().settings
if (!asrEnabled) {
window.message.error({ content: i18n.t('settings.asr.error.not_enabled'), key: 'asr-error' })
return
}
// 检查是否已经在录音
if (this.isRecording) {
console.log('已经在录音中,忽略此次请求')
return
}
// 先设置回调函数,确保在任何情况下都能正确设置
if (onTranscribed && typeof onTranscribed === 'function') {
console.log('[ASRService] 设置结果回调函数')
this.resultCallback = onTranscribed
} else {
console.warn('[ASRService] 未提供有效的回调函数')
}
// 如果是使用本地服务器
if (asrServiceType === 'local') {
// 连接WebSocket服务器
const connected = await this.connectToWebSocketServer()
if (!connected) {
throw new Error('无法连接到语音识别服务')
}
// 获取语言设置
const { asrLanguage } = store.getState().settings
// 检查浏览器是否准备好
if (!this.browserReady) {
// 尝试等待浏览器准备好
let waitAttempts = 0
const maxWaitAttempts = 5
// 尝试打开浏览器页面
try {
// 发送消息提示用户
window.message.info({
content: '正在准备语音识别服务...',
key: 'browser-status'
})
// 尝试自动打开浏览器页面
try {
// 使用ASRServerService获取服务器URL
const serverUrl = 'http://localhost:34515' // 使用正确的端口 34515
console.log('尝试打开语音识别服务器页面:', serverUrl)
window.open(serverUrl, '_blank')
} catch (error) {
console.error('获取服务器URL失败:', error)
}
} catch (error) {
console.error('打开语音识别浏览器页面失败:', error)
}
while (!this.browserReady && waitAttempts < maxWaitAttempts) {
window.message.loading({
content: `等待浏览器准备就绪 (${waitAttempts + 1}/${maxWaitAttempts})...`,
key: 'browser-status'
})
// 等待一秒
await new Promise((resolve) => setTimeout(resolve, 1000))
waitAttempts++
}
if (!this.browserReady) {
window.message.warning({
content: '语音识别浏览器尚未准备好,请确保已打开浏览器页面',
key: 'browser-status'
})
throw new Error('浏览器尚未准备好')
}
}
// 发送开始命令
if (this.ws && this.wsConnected) {
// 将语言设置传递给服务器
this.ws.send(
JSON.stringify({
type: 'start',
language: asrLanguage || 'zh-CN' // 使用设置的语言或默认中文
})
)
this.isRecording = true
console.log('开始语音识别,语言:', asrLanguage || 'zh-CN')
window.message.info({ content: i18n.t('settings.asr.recording'), key: 'asr-recording' })
} else {
throw new Error('WebSocket连接未就绪')
}
return
}
// 以下是原有的录音逻辑OpenAI或浏览器API
// 请求麦克风权限
this.stream = await navigator.mediaDevices.getUserMedia({ audio: true })
// 创建MediaRecorder实例
this.mediaRecorder = new MediaRecorder(this.stream)
// 清空之前的录音数据
this.audioChunks = []
// 设置数据可用时的回调
this.mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
this.audioChunks.push(event.data)
}
}
// 开始录音
this.mediaRecorder.start()
this.isRecording = true
console.log('开始录音')
window.message.info({ content: i18n.t('settings.asr.recording'), key: 'asr-recording' })
} catch (error) {
console.error('开始录音失败:', error)
window.message.error({
content: i18n.t('settings.asr.error.start_failed') + ': ' + (error as Error).message,
key: 'asr-error'
})
this.isRecording = false
}
}
/**
*
* @param onTranscribed
* @returns Promise<void>
*/
stopRecording = async (onTranscribed: (text: string, isFinal?: boolean) => void): Promise<void> => {
const { asrServiceType } = store.getState().settings
// 如果是使用本地服务器
if (asrServiceType === 'local') {
if (!this.isRecording) {
console.log('没有正在进行的语音识别')
return
}
try {
// 保存回调函数
this.resultCallback = onTranscribed
// 发送停止命令
if (this.ws && this.wsConnected) {
this.ws.send(JSON.stringify({ type: 'stop' }))
console.log('停止语音识别')
window.message.loading({ content: i18n.t('settings.asr.processing'), key: 'asr-processing' })
// 立即调用回调函数,使按钮状态立即更新
if (onTranscribed) {
// 使用空字符串调用回调,不会影响输入框,但可以触发按钮状态重置
// 传递false表示这不是最终结果只是状态更新
setTimeout(() => {
onTranscribed('', false)
}, 100)
}
// 不再清除回调函数,允许连续说多句话
// setTimeout(() => {
// // 在停止后的一段时间内清除回调,防止后续结果被处理
// this.resultCallback = null
// }, 3000) // 3秒后清除回调
} else {
throw new Error('WebSocket连接未就绪')
}
// 重置录音状态
this.isRecording = false
} catch (error) {
console.error('停止语音识别失败:', error)
window.message.error({
content: i18n.t('settings.asr.error.transcribe_failed') + ': ' + (error as Error).message,
key: 'asr-processing'
})
this.isRecording = false
}
return
}
// 以下是原有的录音停止逻辑OpenAI或浏览器API
if (!this.isRecording || !this.mediaRecorder) {
console.log('没有正在进行的录音')
return
}
try {
// 创建一个Promise等待录音结束
const recordingEndedPromise = new Promise<Blob>((resolve) => {
if (this.mediaRecorder) {
this.mediaRecorder.onstop = () => {
// 将所有音频块合并为一个Blob
const audioBlob = new Blob(this.audioChunks, { type: 'audio/webm' })
resolve(audioBlob)
}
// 停止录音
this.mediaRecorder.stop()
}
})
// 停止所有轨道
if (this.stream) {
this.stream.getTracks().forEach((track) => track.stop())
this.stream = null
}
// 等待录音结束并获取音频Blob
const audioBlob = await recordingEndedPromise
// 重置录音状态
this.isRecording = false
this.mediaRecorder = null
console.log('录音结束,音频大小:', audioBlob.size, 'bytes')
// 显示处理中消息
window.message.loading({ content: i18n.t('settings.asr.processing'), key: 'asr-processing' })
if (asrServiceType === 'openai') {
// 使用OpenAI的Whisper API进行语音识别
await this.transcribeWithOpenAI(audioBlob, onTranscribed)
} else if (asrServiceType === 'browser') {
// 使用浏览器的Web Speech API进行语音识别
await this.transcribeWithBrowser(audioBlob, onTranscribed)
} else {
throw new Error(`不支持的ASR服务类型: ${asrServiceType}`)
}
} catch (error) {
console.error('停止录音或转录失败:', error)
window.message.error({
content: i18n.t('settings.asr.error.transcribe_failed') + ': ' + (error as Error).message,
key: 'asr-processing'
})
// 重置录音状态
this.isRecording = false
this.mediaRecorder = null
if (this.stream) {
this.stream.getTracks().forEach((track) => track.stop())
this.stream = null
}
}
}
/**
* 使OpenAI的Whisper API进行语音识别
* @param audioBlob Blob
* @param onTranscribed
* @returns Promise<void>
*/
private transcribeWithOpenAI = async (audioBlob: Blob, onTranscribed: (text: string) => void): Promise<void> => {
try {
const { asrApiKey, asrApiUrl, asrModel } = store.getState().settings
if (!asrApiKey) {
throw new Error(i18n.t('settings.asr.error.no_api_key'))
}
// 创建FormData对象
const formData = new FormData()
formData.append('file', audioBlob, 'recording.webm')
formData.append('model', asrModel || 'whisper-1')
// 调用OpenAI API
const response = await fetch(asrApiUrl, {
method: 'POST',
headers: {
Authorization: `Bearer ${asrApiKey}`
},
body: formData
})
if (!response.ok) {
const errorData = await response.json()
throw new Error(errorData.error?.message || 'OpenAI语音识别失败')
}
// 解析响应
const data = await response.json()
const transcribedText = data.text
if (transcribedText) {
console.log('语音识别成功:', transcribedText)
window.message.success({ content: i18n.t('settings.asr.success'), key: 'asr-processing' })
onTranscribed(transcribedText)
} else {
throw new Error('未能识别出文本')
}
} catch (error) {
console.error('OpenAI语音识别失败:', error)
throw error
}
}
/**
* 使Web Speech API进行语音识别
* @param audioBlob Blob
* @param onTranscribed
* @returns Promise<void>
*/
private transcribeWithBrowser = async (_audioBlob: Blob, onTranscribed: (text: string) => void): Promise<void> => {
try {
// 检查浏览器是否支持Web Speech API
if (!('webkitSpeechRecognition' in window) && !('SpeechRecognition' in window)) {
throw new Error(i18n.t('settings.asr.error.browser_not_support'))
}
// 由于Web Speech API不支持直接处理录制的音频这里我们只是模拟一个成功的回调
// 实际上使用Web Speech API时应该直接使用SpeechRecognition对象进行实时识别
// 这里简化处理,实际项目中可能需要更复杂的实现
window.message.success({ content: i18n.t('settings.asr.success'), key: 'asr-processing' })
onTranscribed('浏览器语音识别功能尚未完全实现')
} catch (error) {
console.error('浏览器语音识别失败:', error)
throw error
}
}
/**
*
* @returns boolean
*/
isCurrentlyRecording = (): boolean => {
return this.isRecording
}
/**
* WebSocket是否已连接
* @returns boolean
*/
isWebSocketConnected = (): boolean => {
return this.wsConnected && this.browserReady
}
/**
*
*/
cancelRecording = (): void => {
const { asrServiceType } = store.getState().settings
// 如果是使用本地服务器
if (asrServiceType === 'local') {
// 修改条件,即使不在录音中也进行重置
if (this.isRecording || this.resultCallback) {
// 先重置状态和回调,确保不会处理后续结果
this.isRecording = false
this.resultCallback = null
// 发送停止命令
if (this.ws && this.wsConnected) {
this.ws.send(JSON.stringify({ type: 'stop' }))
console.log('发送停止命令到WebSocket服务器')
// 发送一个额外的命令,要求浏览器强制重置语音识别
setTimeout(() => {
if (this.ws && this.wsConnected) {
this.ws.send(JSON.stringify({ type: 'reset' }))
console.log('发送重置命令到WebSocket服务器')
}
}, 100)
}
console.log('语音识别已取消')
window.message.info({ content: i18n.t('settings.asr.canceled'), key: 'asr-recording' })
}
return
}
// 以下是原有的取消录音逻辑OpenAI或浏览器API
if (this.isRecording && this.mediaRecorder) {
// 停止MediaRecorder
this.mediaRecorder.stop()
// 停止所有轨道
if (this.stream) {
this.stream.getTracks().forEach((track) => track.stop())
this.stream = null
}
// 重置状态
this.isRecording = false
this.mediaRecorder = null
this.audioChunks = []
console.log('录音已取消')
window.message.info({ content: i18n.t('settings.asr.canceled'), key: 'asr-recording' })
}
}
/**
* WebSocket连接
*/
closeWebSocketConnection = (): void => {
if (this.ws) {
try {
this.ws.close()
} catch (e) {
console.error('[ASRService] 关闭WebSocket连接失败:', e)
}
this.ws = null
}
this.wsConnected = false
this.browserReady = false
if (this.reconnectTimeout) {
clearTimeout(this.reconnectTimeout)
this.reconnectTimeout = null
}
}
/**
*
*/
openBrowserPage = (): void => {
// 使用window.open打开浏览器页面
window.open('http://localhost:34515', '_blank') // 使用正确的端口 34515
}
}
// 创建单例实例
const instance = new ASRService()
export default instance

View File

@ -25,5 +25,6 @@ export const EVENT_NAMES = {
ADD_NEW_TOPIC: 'ADD_NEW_TOPIC',
RESEND_MESSAGE: 'RESEND_MESSAGE',
SHOW_MODEL_SELECTOR: 'SHOW_MODEL_SELECTOR',
QUOTE_TEXT: 'QUOTE_TEXT'
QUOTE_TEXT: 'QUOTE_TEXT',
VOICE_CALL_MESSAGE: 'VOICE_CALL_MESSAGE'
}

View File

@ -0,0 +1,67 @@
/**
* 使 src/renderer/src/services/tts/TTSService.ts
*
*/
import { Message } from '@renderer/types'
import { TTSService as NewTTSService } from './tts/index'
/**
* TTS服务
* @deprecated 使 src/renderer/src/services/tts/TTSService.ts
*/
class TTSService {
private service = NewTTSService.getInstance()
/**
*
* @param text
* @param segmented 使
*/
speak = async (text: string, segmented: boolean = false): Promise<void> => {
await this.service.speak(text, segmented)
}
/**
*
*/
stop = (): void => {
this.service.stop()
}
/**
*
* @param message
* @param segmented 使
*/
speakFromMessage = async (message: Message, segmented: boolean = false): Promise<void> => {
await this.service.speakFromMessage(message, segmented)
}
/**
*
*/
isCurrentlyPlaying = (): boolean => {
return this.service.isCurrentlyPlaying()
}
/**
*
* @param segmentIndex
*/
playFromSegment = (segmentIndex: number): void => {
this.service.playFromSegment(segmentIndex)
}
/**
*
* @param time
*/
seek = (time: number): void => {
this.service.seek(time)
}
}
// 导出单例
export default new TTSService()

View File

@ -0,0 +1,984 @@
import { DEFAULT_VOICE_CALL_PROMPT } from '@renderer/config/prompts'
import { fetchChatCompletion } from '@renderer/services/ApiService'
import ASRService from '@renderer/services/ASRService'
import { getDefaultAssistant } from '@renderer/services/AssistantService'
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
import { getAssistantMessage, getUserMessage } from '@renderer/services/MessagesService'
import TTSService from '@renderer/services/TTSService'
import store from '@renderer/store'
import { setSkipNextAutoTTS } from '@renderer/store/settings'
// 导入类型
import type { Message } from '@renderer/types'
import i18n from 'i18next'
interface VoiceCallCallbacks {
onTranscript: (text: string) => void
onResponse: (text: string) => void
onListeningStateChange: (isListening: boolean) => void
onSpeakingStateChange: (isSpeaking: boolean) => void
}
// 为TypeScript添加SpeechRecognition类型
declare global {
interface Window {
SpeechRecognition: any
webkitSpeechRecognition: any
}
}
class VoiceCallServiceClass {
private recognition: any = null
private isCallActive = false
private isRecording = false // 新增录音状态
private isMuted = false
private isPaused = false
private callbacks: VoiceCallCallbacks | null = null
private _currentTranscript = '' // 使用下划线前缀避免未使用警告
private _accumulatedTranscript = '' // 累积的语音识别结果
private conversationHistory: { role: string; content: string }[] = []
private isProcessingResponse = false
private ttsService = TTSService
private recordingTimeout: NodeJS.Timeout | null = null // 录音超时定时器
async initialize() {
// 检查麦克风权限
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
stream.getTracks().forEach((track) => track.stop())
} catch (error) {
console.error('Microphone permission denied:', error)
throw new Error('Microphone permission denied')
}
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
// 如果使用浏览器ASR检查浏览器支持
if (asrServiceType === 'browser') {
if (!('webkitSpeechRecognition' in window) && !('SpeechRecognition' in window)) {
throw new Error('Speech recognition not supported in this browser')
}
// 初始化浏览器语音识别
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
this.recognition = new SpeechRecognition()
this.recognition.continuous = true
this.recognition.interimResults = true
this.recognition.lang = navigator.language || 'zh-CN'
} else if (asrServiceType === 'local') {
// 如果使用本地服务器ASR检查连接
try {
// 尝试连接本地ASR服务器
console.log('初始化时尝试连接语音识别服务器')
const connected = await ASRService.connectToWebSocketServer()
if (!connected) {
console.warn('无法连接到语音识别服务,将在需要时重试')
// 不抛出异常,允许程序继续运行,在需要时重试
} else {
console.log('语音识别服务器连接成功')
}
} catch (error) {
console.error('连接语音识别服务器失败:', error)
// 不抛出异常,允许程序继续运行,在需要时重试
}
}
return true
}
async startCall(callbacks: VoiceCallCallbacks) {
this.callbacks = callbacks
this.isCallActive = true
this.conversationHistory = []
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
// 如果是本地服务器ASR预先连接服务器
if (asrServiceType === 'local') {
try {
// 尝试连接WebSocket服务器
console.log('通话开始,预先连接语音识别服务器')
const connected = await ASRService.connectToWebSocketServer()
if (!connected) {
console.warn('无法连接到语音识别服务器,将在需要时重试')
} else {
console.log('语音识别服务器连接成功')
}
} catch (error) {
console.error('连接语音识别服务器失败:', error)
}
}
// 根据不同的ASR服务类型进行初始化
if (asrServiceType === 'browser') {
if (!this.recognition) {
throw new Error('Browser speech recognition not initialized')
}
// 设置浏览器语音识别事件处理
this.recognition.onresult = (event: any) => {
let interimTranscript = ''
let finalTranscript = ''
for (let i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
finalTranscript += event.results[i][0].transcript
} else {
interimTranscript += event.results[i][0].transcript
}
}
if (interimTranscript) {
// 更新当前的临时识别结果
this._currentTranscript = interimTranscript
// 显示累积结果 + 当前临时结果
this.callbacks?.onTranscript(this._accumulatedTranscript + ' ' + interimTranscript)
}
if (finalTranscript) {
// 将最终结果累积到总结果中
if (this._accumulatedTranscript) {
// 如果已经有累积的文本,添加空格再追加
this._accumulatedTranscript += ' ' + finalTranscript
} else {
// 如果是第一段文本,直接设置
this._accumulatedTranscript = finalTranscript
}
// 更新当前的识别结果
this._currentTranscript = ''
// 显示累积的完整结果
this.callbacks?.onTranscript(this._accumulatedTranscript)
// 在录音过程中只更新transcript不触发handleUserSpeech
// 松开按钮后才会处理完整的录音内容
}
}
this.recognition.onstart = () => {
this.isRecording = true
this.callbacks?.onListeningStateChange(true)
}
this.recognition.onend = () => {
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
}
this.recognition.onerror = (event: any) => {
console.error('Speech recognition error', event.error)
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
}
}
// 设置skipNextAutoTTS为true防止自动播放最后一条消息
store.dispatch(setSkipNextAutoTTS(true))
// 播放欢迎语音 - 根据当前语言获取本地化的欢迎消息
const welcomeMessage = i18n.t('settings.voice_call.welcome_message')
// 不调用onResponse避免触发两次TTS播放
// this.callbacks?.onResponse(welcomeMessage)
// 监听TTS状态
const ttsStateHandler = (isPlaying: boolean) => {
this.callbacks?.onSpeakingStateChange(isPlaying)
}
// 监听TTS播放状态
window.addEventListener('tts-state-change', (event: any) => {
ttsStateHandler(event.detail.isPlaying)
})
// 播放欢迎语音,并手动设置初始状态
this.callbacks?.onSpeakingStateChange(true)
this.ttsService.speak(welcomeMessage)
// 确保欢迎语音结束后状态正确
setTimeout(() => {
if (this.ttsService && !this.ttsService.isCurrentlyPlaying()) {
this.callbacks?.onSpeakingStateChange(false)
}
}, 5000) // 5秒后检查TTS状态
return true
}
/**
*
* @returns Promise<boolean>
*/
async startRecording(): Promise<boolean> {
if (!this.isCallActive || this.isPaused || this.isProcessingResponse || this.isRecording) {
return false
}
// 重置累积的文本
this._accumulatedTranscript = ''
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
try {
if (asrServiceType === 'browser') {
// 浏览器ASR
if (!this.recognition) {
throw new Error('Browser speech recognition not initialized')
}
this.recognition.start()
this.isRecording = true
} else if (asrServiceType === 'local') {
// 本地服务器ASR
try {
// 先检查连接状态,如果未连接则尝试重新连接
if (!ASRService.isWebSocketConnected()) {
console.log('语音识别服务器未连接,尝试重新连接')
const connected = await ASRService.connectToWebSocketServer()
if (!connected) {
throw new Error('无法连接到语音识别服务器')
}
// 等待一下,确保连接已建立
await new Promise((resolve) => setTimeout(resolve, 500))
}
// 开始录音
await ASRService.startRecording((text, isFinal) => {
if (text) {
if (isFinal) {
// 如果是最终结果,累积到总结果中
if (this._accumulatedTranscript) {
// 如果已经有累积的文本,添加空格再追加
this._accumulatedTranscript += ' ' + text
} else {
// 如果是第一段文本,直接设置
this._accumulatedTranscript = text
}
// 更新当前的识别结果
this._currentTranscript = ''
// 显示累积的完整结果
this.callbacks?.onTranscript(this._accumulatedTranscript)
} else {
// 如果是临时结果,更新当前的识别结果
this._currentTranscript = text
// 只显示当前临时结果,不与累积结果拼接
this.callbacks?.onTranscript(text)
}
// 在录音过程中只更新transcript不触发handleUserSpeech
// 松开按钮后才会处理完整的录音内容
}
})
this.isRecording = true
this.callbacks?.onListeningStateChange(true)
} catch (error) {
console.error('启动语音识别失败:', error)
throw error
}
} else if (asrServiceType === 'openai') {
// OpenAI ASR
await ASRService.startRecording()
this.isRecording = true
this.callbacks?.onListeningStateChange(true)
}
// 设置最长录音时间,防止用户忘记松开
this.recordingTimeout = setTimeout(() => {
if (this.isRecording) {
this.stopRecording()
}
}, 60000) // 60秒最长录音时间
return true
} catch (error) {
console.error('Failed to start recording:', error)
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
return false
}
}
/**
* AI
* @returns Promise<boolean>
*/
async stopRecording(): Promise<boolean> {
if (!this.isCallActive || !this.isRecording) {
return false
}
// 清除录音超时定时器
if (this.recordingTimeout) {
clearTimeout(this.recordingTimeout)
this.recordingTimeout = null
}
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
try {
// 立即设置录音状态为false防止重复处理
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
// 存储当前的语音识别结果用于松开按钮后发送给AI
const currentTranscript = this._currentTranscript
// 存储累积的语音识别结果
const accumulatedTranscript = this._accumulatedTranscript
if (asrServiceType === 'browser') {
// 浏览器ASR
if (!this.recognition) {
throw new Error('Browser speech recognition not initialized')
}
this.recognition.stop()
// 优先使用累积的文本,如果有的话
if (accumulatedTranscript) {
console.log('发送累积的语音识别结果给AI:', accumulatedTranscript)
this.handleUserSpeech(accumulatedTranscript)
} else if (currentTranscript) {
// 如果没有累积结果,使用当前结果
console.log('没有累积结果,使用当前结果:', currentTranscript)
this.handleUserSpeech(currentTranscript)
}
// 清除状态
this._currentTranscript = ''
this._accumulatedTranscript = ''
} else if (asrServiceType === 'local') {
// 本地服务器ASR
// 创建一个承诺,等待最终结果
const finalResultPromise = new Promise<string>((resolve) => {
// 设置一个超时器,确保不会无限等待
const timeoutId = setTimeout(() => {
console.log('等待最终结果超时,使用当前结果')
resolve(this._currentTranscript)
}, 1500) // 1.5秒超时
// 设置回调函数来接收最终结果
const resultCallback = (text: string, isFinal?: boolean) => {
// 如果是空字符串,表示只是重置状态,不处理
if (text === '') return
if (text) {
// 只处理最终结果,忽略中间结果
if (isFinal) {
clearTimeout(timeoutId)
console.log('收到最终语音识别结果:', text)
this._currentTranscript = text
this.callbacks?.onTranscript(text)
resolve(text)
} else {
// 对于中间结果只更新显示不解析Promise
console.log('收到中间语音识别结果:', text)
this.callbacks?.onTranscript(text)
}
}
}
// 停止录音,但不取消,以获取最终结果
ASRService.stopRecording(resultCallback)
// 添加额外的安全措施,在停止后立即发送重置命令
setTimeout(() => {
// 发送重置命令,确保浏览器不会继续发送结果
ASRService.cancelRecording()
// 清除ASRService中的回调函数防止后续结果被处理
ASRService.resultCallback = null
}, 2000) // 2秒后强制取消作为安全措施
})
// 等待最终结果但最多等待3秒
const finalText = await finalResultPromise
// 优先使用累积的文本,如果有的话
if (accumulatedTranscript) {
console.log('发送累积的语音识别结果给AI:', accumulatedTranscript)
this.handleUserSpeech(accumulatedTranscript)
} else if (finalText) {
// 如果没有累积结果,使用最终结果
console.log('发送最终语音识别结果给AI:', finalText)
this.handleUserSpeech(finalText)
} else if (currentTranscript) {
// 如果没有最终结果,使用当前结果
console.log('没有最终结果,使用当前结果:', currentTranscript)
this.handleUserSpeech(currentTranscript)
}
// 再次确保所有状态被重置
this._currentTranscript = ''
this._accumulatedTranscript = ''
} else if (asrServiceType === 'openai') {
// OpenAI ASR
await ASRService.stopRecording((text) => {
// 更新最终的语音识别结果
if (text) {
this._currentTranscript = text
this.callbacks?.onTranscript(text)
}
})
// 使用最新的语音识别结果
const finalTranscript = this._currentTranscript
if (finalTranscript) {
this.handleUserSpeech(finalTranscript)
}
// 清除状态
this._currentTranscript = ''
this._accumulatedTranscript = ''
}
return true
} catch (error) {
console.error('Failed to stop recording:', error)
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
// 确保在出错时也清除状态
this._currentTranscript = ''
this._accumulatedTranscript = ''
// 强制取消录音
ASRService.cancelRecording()
return false
}
}
/**
*
* @param text
* @param sendToChat
*/
async handleUserSpeech(text: string, sendToChat: boolean = false) {
if (!this.isCallActive || this.isProcessingResponse || this.isPaused) return
// 暂停语音识别避免在AI回复时继续识别
const { asrServiceType } = store.getState().settings
if (asrServiceType === 'browser') {
this.recognition?.stop()
} else if (asrServiceType === 'local' || asrServiceType === 'openai') {
ASRService.cancelRecording()
}
this.isProcessingResponse = true
try {
// 获取当前助手
const assistant = getDefaultAssistant()
// 检查是否有自定义模型
const { voiceCallModel } = store.getState().settings
if (voiceCallModel) {
// 如果有自定义模型,覆盖默认助手的模型
assistant.model = voiceCallModel
console.log('设置语音通话专用模型:', JSON.stringify(voiceCallModel))
} else {
console.log('没有设置语音通话专用模型,使用默认助手模型:', JSON.stringify(assistant.model))
}
// 如果需要发送到聊天界面,触发事件
if (sendToChat) {
console.log('将语音识别结果发送到聊天界面:', text)
try {
// 获取语音通话专用模型
const { voiceCallModel } = store.getState().settings
// 打印日志查看模型信息
console.log('语音通话专用模型:', voiceCallModel ? JSON.stringify(voiceCallModel) : 'null')
console.log('助手模型:', assistant.model ? JSON.stringify(assistant.model) : 'null')
// 准备要发送的模型
const modelToUse = voiceCallModel || assistant.model
// 确保模型对象完整
if (modelToUse && typeof modelToUse === 'object') {
console.log('使用完整模型对象:', modelToUse.name || modelToUse.id)
} else {
console.error('模型对象不完整或不存在')
}
// 直接触发事件,将语音识别结果发送到聊天界面
// 优先使用语音通话专用模型,而不是助手模型
const eventData = {
text,
model: modelToUse,
isVoiceCall: true, // 标记这是语音通话消息
useVoiceCallModel: true, // 明确标记使用语音通话模型
voiceCallModelId: voiceCallModel?.id // 传递语音通话模型ID
}
// 打印完整的事件数据
console.log('发送语音通话消息事件数据:', JSON.stringify(eventData))
// 发送事件
EventEmitter.emit(EVENT_NAMES.VOICE_CALL_MESSAGE, eventData)
// 打印日志确认事件已触发
console.log(
'事件已触发,消息内容:',
text,
'模型:',
voiceCallModel ? voiceCallModel.name : assistant.model?.name
)
// 使用消息通知用户
window.message.success({ content: '语音识别已完成,正在发送消息...', key: 'voice-call-send' })
} catch (error) {
console.error('发送语音识别结果到聊天界面时出错:', error)
window.message.error({ content: '发送语音识别结果失败', key: 'voice-call-error' })
}
// 不在这里处理响应,因为聊天界面会处理
this.isProcessingResponse = false
return
}
// 以下是原有的处理逻辑,用于独立的语音通话窗口
// 创建一个简单的Topic对象
const topic = {
id: 'voice-call',
assistantId: assistant.id,
name: 'Voice Call',
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
messages: []
}
// 创建用户消息
const userMessage = getUserMessage({
assistant,
topic,
type: 'text',
content: text
})
// 创建助手消息
const assistantMessage = getAssistantMessage({
assistant,
topic
})
// 更新对话历史
this.conversationHistory.push({ role: 'user', content: text })
// 构建消息列表
// 将历史消息转换为正确的Message对象
const historyMessages = this.conversationHistory.map((msg) => {
if (msg.role === 'user') {
return getUserMessage({
assistant,
topic,
type: 'text',
content: msg.content
})
} else {
const assistantMsg = getAssistantMessage({
assistant,
topic
})
return { ...assistantMsg, content: msg.content, status: 'success' }
}
})
// 获取用户自定义提示词
const { voiceCallPrompt } = store.getState().settings
// 使用自定义提示词或默认提示词
const promptToUse = voiceCallPrompt || DEFAULT_VOICE_CALL_PROMPT
// 创建系统指令消息
const systemMessage = {
role: 'system',
content: promptToUse
}
// 修改用户消息的内容
userMessage.content = text
// 构建最终消息列表
// 使用类型断言解决类型问题
const messages = [systemMessage, ...historyMessages, userMessage] as Message[]
// 流式响应处理
let fullResponse = ''
try {
// 调用真实的LLM API
await fetchChatCompletion({
message: assistantMessage,
messages,
assistant,
onResponse: async (msg) => {
if (msg.content && msg.content !== fullResponse) {
fullResponse = msg.content
// 更新UI
this.callbacks?.onResponse(fullResponse)
// 如果TTS正在播放停止它
if (this.ttsService.isCurrentlyPlaying()) {
this.ttsService.stop()
}
}
}
})
// 播放完整响应
if (!this.isMuted && this.isCallActive) {
// 手动设置语音状态
this.callbacks?.onSpeakingStateChange(true)
// 添加TTS状态变化事件监听器
const handleTTSStateChange = (event: CustomEvent) => {
const { isPlaying } = event.detail
console.log('语音通话中检测到TTS状态变化:', isPlaying)
this.callbacks?.onSpeakingStateChange(isPlaying)
}
// 添加事件监听器
window.addEventListener('tts-state-change', handleTTSStateChange as EventListener)
// 更新助手消息的内容
assistantMessage.content = fullResponse
assistantMessage.status = 'success'
// 使用speakFromMessage方法播放会应用TTS过滤选项
this.ttsService.speakFromMessage(assistantMessage)
// 设置超时安全机制,确保事件监听器被移除
setTimeout(() => {
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener)
}, 30000) // 30秒后移除事件监听器
}
// 更新对话历史
this.conversationHistory.push({ role: 'assistant', content: fullResponse })
} catch (innerError) {
console.error('Error generating response:', innerError)
// 如果出错,使用一个简单的回复
fullResponse = `抱歉,处理您的请求时出错了。`
this.callbacks?.onResponse(fullResponse)
if (!this.isMuted && this.isCallActive) {
// 手动设置语音状态
this.callbacks?.onSpeakingStateChange(true)
// 创建一个简单的助手消息对象
const errorMessage = {
id: 'error-message',
role: 'assistant',
content: fullResponse,
status: 'success'
} as Message
// 使用speakFromMessage方法播放会应用TTS过滤选项
this.ttsService.speakFromMessage(errorMessage)
// 确保语音结束后状态正确
setTimeout(() => {
if (this.ttsService && !this.ttsService.isCurrentlyPlaying()) {
this.callbacks?.onSpeakingStateChange(false)
}
}, 1000) // 1秒后检查TTS状态
}
}
} catch (error) {
console.error('Error processing voice response:', error)
} finally {
this.isProcessingResponse = false
// 不自动恢复语音识别,等待用户长按按钮
// 长按说话模式下,我们不需要自动恢复语音识别
}
}
/**
*
* @returns Promise<boolean>
*/
async stopRecordingAndSendToChat(): Promise<boolean> {
if (!this.isCallActive || !this.isRecording) {
return false
}
// 清除录音超时定时器
if (this.recordingTimeout) {
clearTimeout(this.recordingTimeout)
this.recordingTimeout = null
}
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
try {
// 立即设置录音状态为false防止重复处理
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
// 存储当前的语音识别结果用于松开按钮后发送给AI
const currentTranscript = this._currentTranscript
// 存储累积的语音识别结果
const accumulatedTranscript = this._accumulatedTranscript
if (asrServiceType === 'browser') {
// 浏览器ASR
if (!this.recognition) {
throw new Error('Browser speech recognition not initialized')
}
this.recognition.stop()
// 优先使用累积的文本,如果有的话
if (accumulatedTranscript && accumulatedTranscript.trim()) {
console.log('发送累积的语音识别结果给聊天界面:', accumulatedTranscript)
this.handleUserSpeech(accumulatedTranscript, true)
} else if (currentTranscript && currentTranscript.trim()) {
// 如果没有累积结果,使用当前结果
console.log('没有累积结果,使用当前结果发送给聊天界面:', currentTranscript)
this.handleUserSpeech(currentTranscript, true)
} else {
console.log('没有有效的语音识别结果,不发送消息')
window.message.info({ content: '没有收到语音输入', key: 'voice-call-empty' })
}
// 清除状态
this._currentTranscript = ''
this._accumulatedTranscript = ''
} else if (asrServiceType === 'local') {
// 本地服务器ASR
// 创建一个承诺,等待最终结果
const finalResultPromise = new Promise<string>((resolve) => {
// 设置一个超时器,确保不会无限等待
const timeoutId = setTimeout(() => {
console.log('等待最终结果超时,使用当前结果')
resolve(this._currentTranscript)
}, 1500) // 1.5秒超时
// 设置回调函数来接收最终结果
const resultCallback = (text: string, isFinal?: boolean) => {
// 如果是空字符串,表示只是重置状态,不处理
if (text === '') return
if (text) {
// 只处理最终结果,忽略中间结果
if (isFinal) {
clearTimeout(timeoutId)
console.log('收到最终语音识别结果:', text)
this._currentTranscript = text
this.callbacks?.onTranscript(text)
resolve(text)
} else {
// 对于中间结果只更新显示不解析Promise
console.log('收到中间语音识别结果:', text)
this.callbacks?.onTranscript(text)
}
}
}
// 停止录音,但不取消,以获取最终结果
ASRService.stopRecording(resultCallback)
// 添加额外的安全措施,在停止后立即发送重置命令
setTimeout(() => {
// 发送重置命令,确保浏览器不会继续发送结果
ASRService.cancelRecording()
// 清除ASRService中的回调函数防止后续结果被处理
ASRService.resultCallback = null
}, 2000) // 2秒后强制取消作为安全措施
})
// 等待最终结果但最多等待3秒
const finalText = await finalResultPromise
// 优先使用累积的文本,如果有的话
if (accumulatedTranscript && accumulatedTranscript.trim()) {
console.log('发送累积的语音识别结果给聊天界面:', accumulatedTranscript)
this.handleUserSpeech(accumulatedTranscript, true)
} else if (finalText && finalText.trim()) {
// 如果没有累积结果,使用最终结果
console.log('发送最终语音识别结果给聊天界面:', finalText)
this.handleUserSpeech(finalText, true)
} else if (currentTranscript && currentTranscript.trim()) {
// 如果没有最终结果,使用当前结果
console.log('没有最终结果,使用当前结果发送给聊天界面:', currentTranscript)
this.handleUserSpeech(currentTranscript, true)
} else {
console.log('没有有效的语音识别结果,不发送消息')
window.message.info({ content: '没有收到语音输入', key: 'voice-call-empty' })
}
// 再次确保所有状态被重置
this._currentTranscript = ''
this._accumulatedTranscript = ''
} else if (asrServiceType === 'openai') {
// OpenAI ASR
await ASRService.stopRecording((text) => {
// 更新最终的语音识别结果
if (text) {
this._currentTranscript = text
this.callbacks?.onTranscript(text)
}
})
// 使用最新的语音识别结果
const finalTranscript = this._currentTranscript
if (finalTranscript && finalTranscript.trim()) {
console.log('发送OpenAI语音识别结果给聊天界面:', finalTranscript)
this.handleUserSpeech(finalTranscript, true)
} else {
console.log('没有有效的OpenAI语音识别结果不发送消息')
window.message.info({ content: '没有收到语音输入', key: 'voice-call-empty' })
}
// 清除状态
this._currentTranscript = ''
this._accumulatedTranscript = ''
}
return true
} catch (error) {
console.error('Failed to stop recording:', error)
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
// 确保在出错时也清除状态
this._currentTranscript = ''
this._accumulatedTranscript = ''
// 强制取消录音
ASRService.cancelRecording()
return false
}
}
/**
* AI
* @returns Promise<boolean>
*/
async cancelRecording(): Promise<boolean> {
if (!this.isCallActive || !this.isRecording) {
return false
}
// 清除录音超时定时器
if (this.recordingTimeout) {
clearTimeout(this.recordingTimeout)
this.recordingTimeout = null
}
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
try {
if (asrServiceType === 'browser') {
// 浏览器ASR
if (!this.recognition) {
throw new Error('Browser speech recognition not initialized')
}
this.recognition.stop()
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
} else if (asrServiceType === 'local') {
// 本地服务器ASR
ASRService.cancelRecording()
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
} else if (asrServiceType === 'openai') {
// OpenAI ASR
ASRService.cancelRecording()
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
}
// 清除当前识别结果
this._currentTranscript = ''
this.callbacks?.onTranscript('')
return true
} catch (error) {
console.error('Failed to cancel recording:', error)
this.isRecording = false
this.callbacks?.onListeningStateChange(false)
return false
}
}
setMuted(muted: boolean) {
this.isMuted = muted
// 如果设置为静音停止当前TTS播放
if (muted && this.ttsService.isCurrentlyPlaying()) {
this.ttsService.stop()
}
}
/**
* TTS播放
* @returns void
*/
stopTTS(): void {
// 无论是否正在播放都强制停止TTS
this.ttsService.stop()
console.log('强制停止TTS播放')
// 注意不需要手动触发事件因为在TTSService.stop()中已经触发了
}
setPaused(paused: boolean) {
this.isPaused = paused
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
if (paused) {
// 暂停语音识别
if (asrServiceType === 'browser') {
this.recognition?.stop()
} else if (asrServiceType === 'local' || asrServiceType === 'openai') {
ASRService.cancelRecording()
}
// 暂停TTS
if (this.ttsService.isCurrentlyPlaying()) {
this.ttsService.stop()
}
}
// 不自动恢复语音识别,等待用户长按按钮
}
endCall() {
this.isCallActive = false
// 获取当前ASR服务类型
const { asrServiceType } = store.getState().settings
// 停止语音识别
if (asrServiceType === 'browser') {
this.recognition?.stop()
} else if (asrServiceType === 'local' || asrServiceType === 'openai') {
ASRService.cancelRecording()
}
// 停止TTS
if (this.ttsService.isCurrentlyPlaying()) {
this.ttsService.stop()
}
this.callbacks = null
}
}
export const VoiceCallService = new VoiceCallServiceClass()

View File

@ -0,0 +1,88 @@
/**
*
* TTS的音频数据
*/
export class AudioStreamProcessor {
private audioContext: AudioContext | null = null
private audioQueue: Uint8Array[] = []
private isProcessing: boolean = false
// 回调函数
public onAudioBuffer: ((buffer: AudioBuffer) => void) | null = null
/**
*
*/
public async initialize(): Promise<void> {
// 创建音频上下文
this.audioContext = new AudioContext()
this.audioQueue = []
this.isProcessing = false
}
/**
*
* @param chunk
*/
public async processAudioChunk(chunk: Uint8Array): Promise<void> {
if (!this.audioContext) {
throw new Error('AudioStreamProcessor not initialized')
}
// 将数据块添加到队列
this.audioQueue.push(chunk)
// 如果没有正在处理,开始处理
if (!this.isProcessing) {
this.processQueue()
}
}
/**
*
*/
private async processQueue(): Promise<void> {
if (!this.audioContext || this.audioQueue.length === 0) {
this.isProcessing = false
return
}
this.isProcessing = true
// 获取队列中的第一个数据块
const chunk = this.audioQueue.shift()!
try {
// 解码音频数据
// 将SharedArrayBuffer转换为ArrayBuffer
const arrayBuffer = chunk.buffer instanceof SharedArrayBuffer ? new Uint8Array(chunk.buffer).buffer : chunk.buffer
const audioBuffer = await this.audioContext.decodeAudioData(arrayBuffer as ArrayBuffer)
// 调用回调函数
if (this.onAudioBuffer) {
this.onAudioBuffer(audioBuffer)
}
} catch (error) {
console.error('解码音频数据失败:', error)
}
// 继续处理队列中的下一个数据块
this.processQueue()
}
/**
*
*/
public async finish(): Promise<void> {
// 等待队列处理完成
while (this.audioQueue.length > 0) {
await new Promise((resolve) => setTimeout(resolve, 100))
}
// 关闭音频上下文
if (this.audioContext) {
await this.audioContext.close()
this.audioContext = null
}
}
}

View File

@ -0,0 +1,283 @@
import i18n from '@renderer/i18n'
import { TTSServiceInterface } from './TTSServiceInterface'
// 全局变量来跟踪当前正在播放的语音
let currentUtterance: SpeechSynthesisUtterance | null = null
// 全局变量来跟踪是否正在播放
export let isEdgeTTSPlaying = false
/**
* Edge TTS服务实现类
*/
export class EdgeTTSService implements TTSServiceInterface {
private edgeVoice: string
/**
*
* @param edgeVoice Edge语音
*/
constructor(edgeVoice: string) {
this.edgeVoice = edgeVoice
console.log('初始化EdgeTTSService语音:', edgeVoice)
}
/**
*
* @throws
*/
private validateParams(): void {
if (!this.edgeVoice) {
throw new Error(i18n.t('settings.tts.error.no_edge_voice'))
}
}
/**
*
* @param text
* @returns
*/
private playDirectly(text: string): boolean {
try {
// 验证参数
this.validateParams()
// 使用Web Speech API
if (!('speechSynthesis' in window)) {
throw new Error(i18n.t('settings.tts.error.browser_not_support'))
}
// 停止当前正在播放的语音
window.speechSynthesis.cancel()
if (currentUtterance) {
currentUtterance = null
}
isEdgeTTSPlaying = false
// 创建语音合成器实例
const utterance = new SpeechSynthesisUtterance(text)
currentUtterance = utterance
isEdgeTTSPlaying = true
// 获取可用的语音合成声音
const voices = window.speechSynthesis.getVoices()
console.log('可用的语音合成声音:', voices)
// 查找指定的语音
let selectedVoice = voices.find((v) => v.name === this.edgeVoice)
// 如果没有找到指定的语音,尝试使用中文语音
if (!selectedVoice) {
console.warn('未找到指定的语音:', this.edgeVoice)
// 尝试找中文语音
selectedVoice = voices.find((v) => v.lang === 'zh-CN')
if (selectedVoice) {
console.log('使用替代中文语音:', selectedVoice.name)
} else {
// 如果没有中文语音,使用第一个可用的语音
if (voices.length > 0) {
selectedVoice = voices[0]
console.log('使用第一个可用的语音:', selectedVoice.name)
} else {
console.warn('没有可用的语音')
return false
}
}
} else {
console.log('已选择语音:', selectedVoice.name)
}
// 设置语音
if (selectedVoice) {
utterance.voice = selectedVoice
}
// 设置事件处理程序
utterance.onend = () => {
console.log('语音合成已结束')
currentUtterance = null
isEdgeTTSPlaying = false
// 分发一个自定义事件,通知语音合成已结束
// 这样TTSService可以监听这个事件并重置播放状态
const event = new CustomEvent('edgeTTSComplete', { detail: { text } })
document.dispatchEvent(event)
}
utterance.onerror = (errorEvent) => {
console.error('语音合成错误:', errorEvent)
currentUtterance = null
isEdgeTTSPlaying = false
// 在错误时也触发结束事件,确保状态更新
const completeEvent = new CustomEvent('edgeTTSComplete', { detail: { text, error: true } })
document.dispatchEvent(completeEvent)
}
// 开始语音合成
window.speechSynthesis.speak(utterance)
return true
} catch (error) {
console.error('直接播放语音失败:', error)
return false
}
}
/**
*
* @param text
* @returns Blob对象的Promise
*/
async synthesize(text: string): Promise<Blob> {
// 验证参数
this.validateParams()
// 先尝试直接播放
const playResult = this.playDirectly(text)
if (playResult) {
// 如果直接播放成功返回一个有效的音频Blob
// 创建一个简单的音频文件,包含一个短暂停
// 这个文件可以被浏览器正常播放,但实际上不会发出声音
// 因为我们已经使用Web Speech API直接播放了语音
const silentAudioBase64 = 'UklGRiQAAABXQVZFZm10IBAAAAABAAEARKwAAIhYAQACABAAZGF0YQAAAAA='
const silentAudioBuffer = Uint8Array.from(atob(silentAudioBase64), (c) => c.charCodeAt(0))
return new Blob([silentAudioBuffer], { type: 'audio/wav' })
}
// 如果直接播放失败,尝试录制方法
console.log('直接播放失败,尝试录制方法')
try {
console.log('使用浏览器TTS生成语音音色:', this.edgeVoice)
// 使用Web Speech API
if (!('speechSynthesis' in window)) {
throw new Error(i18n.t('settings.tts.error.browser_not_support'))
}
// 停止当前正在播放的语音
window.speechSynthesis.cancel()
isEdgeTTSPlaying = false
// 创建语音合成器实例
const utterance = new SpeechSynthesisUtterance(text)
// 获取可用的语音合成声音
const voices = window.speechSynthesis.getVoices()
console.log('初始可用的语音合成声音:', voices)
// 如果没有可用的声音,等待声音加载
if (voices.length === 0) {
try {
await new Promise<void>((resolve) => {
const voicesChangedHandler = () => {
window.speechSynthesis.onvoiceschanged = null
resolve()
}
window.speechSynthesis.onvoiceschanged = voicesChangedHandler
// 设置超时,防止无限等待
setTimeout(() => {
window.speechSynthesis.onvoiceschanged = null
resolve()
}, 5000)
})
} catch (error) {
console.error('等待语音加载超时:', error)
}
}
// 重新获取可用的语音合成声音
const updatedVoices = window.speechSynthesis.getVoices()
console.log('更新后可用的语音合成声音:', updatedVoices)
// 查找指定的语音
let selectedVoice = updatedVoices.find((v) => v.name === this.edgeVoice)
// 如果没有找到指定的语音,尝试使用中文语音
if (!selectedVoice) {
console.warn('未找到指定的语音:', this.edgeVoice)
// 尝试找中文语音
selectedVoice = updatedVoices.find((v) => v.lang === 'zh-CN')
if (selectedVoice) {
console.log('使用替代中文语音:', selectedVoice.name)
} else {
// 如果没有中文语音,使用第一个可用的语音
if (updatedVoices.length > 0) {
selectedVoice = updatedVoices[0]
console.log('使用第一个可用的语音:', selectedVoice.name)
} else {
console.warn('没有可用的语音')
}
}
} else {
console.log('已选择语音:', selectedVoice.name)
}
// 设置语音
if (selectedVoice) {
utterance.voice = selectedVoice
}
// 创建一个Promise来等待语音合成完成
return await new Promise<Blob>((resolve, reject) => {
try {
// 使用AudioContext捕获语音合成的音频
const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)()
const audioDestination = audioContext.createMediaStreamDestination()
const mediaRecorder = new MediaRecorder(audioDestination.stream)
const audioChunks: BlobPart[] = []
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
audioChunks.push(event.data)
}
}
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' })
resolve(audioBlob)
}
// 开始录制
mediaRecorder.start()
// 设置语音合成事件
utterance.onend = () => {
// 语音合成结束后停止录制
setTimeout(() => {
mediaRecorder.stop()
}, 500) // 等待一下,确保所有音频都被捕获
}
utterance.onerror = (event) => {
console.error('语音合成错误:', event)
mediaRecorder.stop()
reject(new Error('语音合成错误'))
}
// 开始语音合成
window.speechSynthesis.speak(utterance)
// 设置超时,防止无限等待
setTimeout(() => {
if (mediaRecorder.state === 'recording') {
console.warn('语音合成超时,强制停止')
mediaRecorder.stop()
}
}, 10000) // 10秒超时
} catch (error: any) {
console.error('浏览器TTS语音合成失败:', error)
reject(new Error(`浏览器TTS语音合成失败: ${error?.message || '未知错误'}`))
}
})
} catch (error: any) {
console.error('浏览器TTS语音合成失败:', error)
// 即使失败也返回一个空的Blob而不是抛出异常
// 这样可以避免在UI上显示错误消息
return new Blob([], { type: 'audio/wav' })
}
}
}

View File

@ -0,0 +1,65 @@
import { TTSServiceInterface } from './TTSServiceInterface'
/**
* 线TTS服务实现类
* 使线TTS服务API密钥
*/
export class MsTTSService implements TTSServiceInterface {
private voice: string
private outputFormat: string
/**
*
* @param voice
* @param outputFormat
*/
constructor(voice: string, outputFormat: string) {
this.voice = voice
this.outputFormat = outputFormat
console.log('初始化MsTTSService语音:', voice, '输出格式:', outputFormat)
}
/**
*
* @throws
*/
private validateParams(): void {
if (!this.voice) {
// 如果没有设置音色,使用默认的小晓音色
console.warn('未设置免费在线TTS音色使用默认音色 zh-CN-XiaoxiaoNeural')
this.voice = 'zh-CN-XiaoxiaoNeural'
}
if (!this.outputFormat) {
// 如果没有设置输出格式,使用默认格式
console.warn('未设置免费在线TTS输出格式使用默认格式 audio-24khz-48kbitrate-mono-mp3')
this.outputFormat = 'audio-24khz-48kbitrate-mono-mp3'
}
}
/**
*
* @param text
* @returns Blob对象的Promise
*/
async synthesize(text: string): Promise<Blob> {
// 验证参数
this.validateParams()
try {
console.log('使用免费在线TTS生成语音音色:', this.voice)
// 通过IPC调用主进程的MsTTSService
const outputPath = await window.api.msTTS.synthesize(text, this.voice, this.outputFormat)
// 读取生成的音频文件
const audioData = await window.api.fs.read(outputPath)
// 将Buffer转换为Blob
return new Blob([audioData], { type: 'audio/mp3' })
} catch (error: any) {
console.error('免费在线TTS语音合成失败:', error)
throw new Error(`免费在线TTS语音合成失败: ${error?.message || '未知错误'}`)
}
}
}

View File

@ -0,0 +1,93 @@
import i18n from '@renderer/i18n'
import { TTSServiceInterface } from './TTSServiceInterface'
/**
* OpenAI TTS服务实现类
*/
export class OpenAITTSService implements TTSServiceInterface {
private apiKey: string
private apiUrl: string
private voice: string
private model: string
/**
*
* @param apiKey OpenAI API密钥
* @param apiUrl OpenAI API地址
* @param voice
* @param model
*/
constructor(apiKey: string, apiUrl: string, voice: string, model: string) {
this.apiKey = apiKey
this.apiUrl = apiUrl
this.voice = voice
this.model = model
}
/**
*
* @throws
*/
private validateParams(): void {
if (!this.apiKey) {
throw new Error(i18n.t('settings.tts.error.no_api_key'))
}
if (!this.apiUrl) {
throw new Error(i18n.t('settings.tts.error.no_api_url'))
}
if (!this.voice) {
throw new Error(i18n.t('settings.tts.error.no_voice'))
}
if (!this.model) {
throw new Error(i18n.t('settings.tts.error.no_model'))
}
}
/**
*
* @param text
* @returns Blob对象的Promise
*/
async synthesize(text: string): Promise<Blob> {
// 验证参数
this.validateParams()
// 准备OpenAI TTS请求体
const requestBody: any = {
input: text
}
// 只有当模型和音色不为空时才添加到请求体中
if (this.model) {
requestBody.model = this.model
}
if (this.voice) {
requestBody.voice = this.voice
}
// 调用OpenAI TTS API
console.log('调用OpenAI TTS API开始合成语音')
const response = await fetch(this.apiUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`
},
body: JSON.stringify(requestBody)
})
if (!response.ok) {
const errorData = await response.json()
throw new Error(errorData.error?.message || 'OpenAI语音合成失败')
}
// 获取音频数据
console.log('获取到OpenAI TTS响应开始处理音频数据')
return await response.blob()
}
}

View File

@ -0,0 +1,117 @@
import i18n from '@renderer/i18n'
import { TTSServiceInterface } from './TTSServiceInterface'
/**
* TTS服务实现类
*/
export class SiliconflowTTSService implements TTSServiceInterface {
private apiKey: string
private apiUrl: string
private voice: string
private model: string
private responseFormat: string
private speed: number
/**
*
* @param apiKey API密钥
* @param apiUrl API地址
* @param voice
* @param model
* @param responseFormat
* @param speed
*/
constructor(
apiKey: string,
apiUrl: string,
voice: string,
model: string,
responseFormat: string = 'mp3',
speed: number = 1.0
) {
this.apiKey = apiKey
this.apiUrl = apiUrl || 'https://api.siliconflow.cn/v1/audio/speech'
this.voice = voice
this.model = model
this.responseFormat = responseFormat
this.speed = speed
}
/**
*
* @throws
*/
private validateParams(): void {
if (!this.apiKey) {
throw new Error(i18n.t('settings.tts.error.no_api_key'))
}
if (!this.voice) {
throw new Error(i18n.t('settings.tts.error.no_voice'))
}
if (!this.model) {
throw new Error(i18n.t('settings.tts.error.no_model'))
}
}
/**
*
* @param text
* @returns Blob对象的Promise
*/
async synthesize(text: string): Promise<Blob> {
// 验证参数
this.validateParams()
// 准备硅基流动TTS请求体
const requestBody: any = {
model: this.model,
input: text,
voice: this.voice,
// 使用配置的响应格式默认为mp3
response_format: this.responseFormat,
stream: false,
speed: this.speed
}
console.log('硅基流动TTS请求参数:', {
model: this.model,
voice: this.voice,
response_format: 'mp3',
speed: this.speed
})
// 调用硅基流动TTS API
console.log('调用硅基流动TTS API开始合成语音')
const response = await fetch(this.apiUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`
},
body: JSON.stringify(requestBody)
})
if (!response.ok) {
let errorMessage = '硅基流动语音合成失败'
try {
const errorData = await response.json()
errorMessage = errorData.error?.message || errorMessage
} catch (e) {
// 如果无法解析JSON使用默认错误消息
}
throw new Error(errorMessage)
}
// 获取音频数据
console.log('获取到硅基流动TTS响应开始处理音频数据')
// 获取原始Blob
const originalBlob = await response.blob()
// 创建一个新的Blob并指定正确的MIME类型
return new Blob([originalBlob], { type: 'audio/mpeg' })
}
}

View File

@ -0,0 +1,686 @@
import i18n from '@renderer/i18n'
import store from '@renderer/store'
import { setLastPlayedMessageId } from '@renderer/store/settings'
import { Message } from '@renderer/types'
import { TextSegmenter } from './TextSegmenter'
import { TTSServiceFactory } from './TTSServiceFactory'
import { TTSTextFilter } from './TTSTextFilter'
/**
* TTS服务类
*
*/
// 音频段落接口
interface AudioSegment {
text: string // 段落文本
audioBlob?: Blob // 对应的音频Blob
audioUrl?: string // 音频URL
isLoaded: boolean // 是否已加载
isLoading: boolean // 是否正在加载
}
export class TTSService {
private static instance: TTSService
private audioElement: HTMLAudioElement | null = null
private isPlaying = false
private playingServiceType: string | null = null
private playingMessageId: string | null = null
private progressUpdateInterval: NodeJS.Timeout | null = null
// 分段播放相关属性
private audioSegments: AudioSegment[] = []
private currentSegmentIndex: number = 0
private isSegmentedPlayback: boolean = false
// 错误消息节流控制
private lastErrorTime = 0
private errorThrottleTime = 2000 // 2秒内不重复显示相同错误
/**
*
* @returns TTSService实例
*/
public static getInstance(): TTSService {
// 每次调用时强制重新创建实例,确保使用最新的设置
// 注意:这会导致每次调用时都创建新的音频元素,可能会有内存泄漏风险
// 但在当前情况下这是解决TTS服务类型切换问题的最简单方法
TTSService.instance = new TTSService()
return TTSService.instance
}
/**
*
*/
private constructor() {
// 创建音频元素
this.audioElement = document.createElement('audio')
this.audioElement.style.display = 'none'
document.body.appendChild(this.audioElement)
// 监听音频播放结束事件
this.audioElement.addEventListener('ended', () => {
// 只有在非EdgeTTS服务时才直接更新状态
if (this.playingServiceType !== 'edge') {
this.updatePlayingState(false)
console.log('TTS播放结束 (音频元素事件)')
}
})
// 监听浏览器TTS直接播放结束的自定义事件
document.addEventListener('edgeTTSComplete', () => {
console.log('收到浏览器TTS直接播放结束事件')
this.updatePlayingState(false)
})
// 监听全局的speechSynthesis状态
if ('speechSynthesis' in window) {
// 创建一个定时器定期检查speechSynthesis的状态
setInterval(() => {
// 只有在使用EdgeTTS且标记为正在播放时才检查
if (this.isPlaying && this.playingServiceType === 'edge') {
// 检查是否还在播放
const isSpeaking = window.speechSynthesis.speaking
if (!isSpeaking) {
console.log('检测到speechSynthesis不再播放更新状态')
this.updatePlayingState(false)
}
}
}, 500) // 每500毫秒检查一次
}
}
/**
*
* @param message
* @param segmented 使
* @returns
*/
public async speakFromMessage(message: Message, segmented: boolean = false): Promise<boolean> {
// 获取最新的TTS过滤选项
const settings = store.getState().settings
const ttsFilterOptions = settings.ttsFilterOptions || {
filterThinkingProcess: true,
filterMarkdown: true,
filterCodeBlocks: true,
filterHtmlTags: true,
filterEmojis: true,
maxTextLength: 4000
}
// 更新最后播放的消息ID
const dispatch = store.dispatch
dispatch(setLastPlayedMessageId(message.id))
console.log('更新最后播放的消息ID:', message.id)
// 记录当前正在播放的消息ID
this.playingMessageId = message.id
// 应用过滤
const filteredText = TTSTextFilter.filterText(message.content, ttsFilterOptions)
console.log('TTS过滤前文本长度:', message.content.length, '过滤后:', filteredText.length)
// 播放过滤后的文本
return this.speak(filteredText, segmented)
}
/**
*
* @param isPlaying
*/
private updatePlayingState(isPlaying: boolean): void {
// 只有状态变化时才更新和触发事件
if (this.isPlaying !== isPlaying) {
this.isPlaying = isPlaying
console.log(`TTS播放状态更新: ${isPlaying ? '开始播放' : '停止播放'}`)
// 触发自定义事件通知其他组件TTS状态变化
const event = new CustomEvent('tts-state-change', { detail: { isPlaying } })
window.dispatchEvent(event)
// 如果开始播放,启动进度更新定时器
if (isPlaying && this.audioElement) {
this.startProgressUpdates()
}
// 如果停止播放,清除服务类型和定时器
if (!isPlaying) {
this.playingServiceType = null
this.stopProgressUpdates()
// 确保Web Speech API也停止
if ('speechSynthesis' in window) {
window.speechSynthesis.cancel()
}
}
}
}
/**
*
* @param text
* @param segmented 使
* @returns
*/
public async speak(text: string, segmented: boolean = false): Promise<boolean> {
try {
// 检查TTS是否启用
const settings = store.getState().settings
const ttsEnabled = settings.ttsEnabled
if (!ttsEnabled) {
this.showErrorMessage(i18n.t('settings.tts.error.not_enabled'))
return false
}
// 如果正在播放,先停止
if (this.isPlaying) {
this.stop()
// 添加短暂延迟,确保上一个播放完全停止
await new Promise((resolve) => setTimeout(resolve, 100))
}
// 确保文本不为空
if (!text || text.trim() === '') {
this.showErrorMessage(i18n.t('settings.tts.error.empty_text'))
return false
}
// 获取最新的设置
// 强制刷新状态对象,确保获取最新的设置
const latestSettings = store.getState().settings
const serviceType = latestSettings.ttsServiceType || 'openai'
console.log('使用的TTS服务类型:', serviceType)
// 记录当前使用的服务类型
this.playingServiceType = serviceType
// 设置分段播放模式
this.isSegmentedPlayback = segmented
if (segmented) {
// 分段播放模式
return await this.speakSegmented(text, serviceType, latestSettings)
}
console.log('当前TTS设置详情:', {
ttsServiceType: serviceType,
ttsEdgeVoice: latestSettings.ttsEdgeVoice,
ttsSiliconflowApiKey: latestSettings.ttsSiliconflowApiKey ? '已设置' : '未设置',
ttsSiliconflowVoice: latestSettings.ttsSiliconflowVoice,
ttsSiliconflowModel: latestSettings.ttsSiliconflowModel,
ttsSiliconflowResponseFormat: latestSettings.ttsSiliconflowResponseFormat,
ttsSiliconflowSpeed: latestSettings.ttsSiliconflowSpeed
})
try {
// 使用工厂创建TTS服务
const ttsService = TTSServiceFactory.createService(serviceType, latestSettings)
// 合成语音
const audioBlob = await ttsService.synthesize(text)
// 播放音频
if (audioBlob) {
const audioUrl = URL.createObjectURL(audioBlob)
if (this.audioElement) {
// 打印音频Blob信息帮助调试
console.log('音频Blob信息:', {
size: audioBlob.size,
type: audioBlob.type,
serviceType: serviceType
})
this.audioElement.src = audioUrl
this.audioElement.play().catch((error) => {
// 检查是否是浏览器TTS直接播放的情况
// 如果是浏览器TTS且音频大小很小则不显示错误消息
const isEdgeTTS = serviceType === 'edge'
const isSmallBlob = audioBlob.size < 100 // 小于100字节的音频文件可能是我们的静音文件
if (isEdgeTTS && isSmallBlob) {
console.log('浏览器TTS直接播放中忽略音频元素错误')
} else {
console.error('播放TTS音频失败:', error)
console.error('音频URL:', audioUrl)
console.error('音频Blob类型:', audioBlob.type)
console.error('音频Blob大小:', audioBlob.size)
this.showErrorMessage(i18n.t('settings.tts.error.play_failed'))
}
})
// 更新播放状态
this.updatePlayingState(true)
console.log('开始播放TTS音频')
// 释放URL对象
this.audioElement.onended = () => {
URL.revokeObjectURL(audioUrl)
// 检查是否是浏览器TTS直接播放的情况
const isEdgeTTS = serviceType === 'edge'
const isSmallBlob = audioBlob.size < 100
// 对于非EdgeTTS服务直接更新状态
// EdgeTTS服务的状态更新由定时器和edgeTTSComplete事件处理
if (!(isEdgeTTS && isSmallBlob)) {
this.updatePlayingState(false)
}
}
return true
}
}
return false
} catch (error: any) {
console.error('TTS合成失败:', error)
this.showErrorMessage(error?.message || i18n.t('settings.tts.error.synthesis_failed'))
return false
}
} catch (error) {
console.error('TTS播放失败:', error)
this.showErrorMessage(i18n.t('settings.tts.error.general'))
return false
}
}
/**
*
*/
public stop(): void {
// 无论是否正在播放,都强制停止
if (this.audioElement) {
this.audioElement.pause()
this.audioElement.currentTime = 0
console.log('强制停止TTS播放')
}
// 如果是EdgeTTS确保Web Speech API也停止
if ('speechSynthesis' in window) {
window.speechSynthesis.cancel()
console.log('停止Web Speech API播放')
}
// 停止进度更新
this.stopProgressUpdates()
// 更新状态并触发事件
this.updatePlayingState(false)
// 清除正在播放的消息ID
this.playingMessageId = null
// 发送一个最终的进度更新事件,确保进度条消失
this.emitProgressUpdateEvent(0, 0, 0)
// 如果是分段播放模式,不清理资源,以便用户可以从其他段落继续播放
}
/**
*
* @returns
*/
public isCurrentlyPlaying(): boolean {
return this.isPlaying
}
/**
*
* @param time
* @returns
*/
public seek(time: number): boolean {
if (!this.audioElement || !this.isPlaying) {
console.log('无法跳转,音频元素不存在或未在播放中')
return false
}
try {
// 确保时间在有效范围内
const duration = this.audioElement.duration || 0
const validTime = Math.max(0, Math.min(time, duration))
console.log(`跳转到时间点: ${validTime.toFixed(2)}秒 / ${duration.toFixed(2)}`)
this.audioElement.currentTime = validTime
return true
} catch (error) {
console.error('跳转失败:', error)
return false
}
}
/**
*
* @param index
* @returns
*/
public playFromSegment(index: number): boolean {
console.log(`请求从段落 ${index} 开始播放`)
// 如果当前不是分段播放模式,则先将当前消息切换为分段播放模式
if (!this.isSegmentedPlayback) {
console.log('当前不是分段播放模式,无法从指定段落开始播放')
return false
}
if (index < 0 || index >= this.audioSegments.length) {
console.log(`段落索引超出范围: ${index}, 总段落数: ${this.audioSegments.length}`)
return false
}
// 如果正在播放,先停止
if (this.isPlaying) {
console.log('停止当前播放')
this.stop()
}
console.log(`开始播放段落 ${index}: ${this.audioSegments[index].text.substring(0, 20)}...`)
// 开始播放指定段落
return this.playSegment(index)
}
/**
*
* @param text
* @param serviceType TTS服务类型
* @param settings
* @returns
*/
private async speakSegmented(text: string, serviceType: string, settings: any): Promise<boolean> {
try {
console.log('开始分段播放模式')
// 分割文本为句子
const sentences = TextSegmenter.splitIntoSentences(text)
console.log(`文本分割为 ${sentences.length} 个段落`)
if (sentences.length === 0) {
console.log('没有有效段落,取消播放')
return false
}
// 创建音频段落数组
this.audioSegments = sentences.map((sentence) => ({
text: sentence,
isLoaded: false,
isLoading: false
}))
// 设置分段播放模式
this.isSegmentedPlayback = true
// 重置当前段落索引
this.currentSegmentIndex = 0
// 触发分段播放事件
this.emitSegmentedPlaybackEvent()
// 预加载所有段落,确保完整播放
for (let i = 0; i < sentences.length; i++) {
// 使用setTimeout错开加载时间避免同时发起过多请求
setTimeout(() => {
if (this.isSegmentedPlayback) {
// 确保仍然在分段播放模式
this.loadSegmentAudio(i, serviceType, settings)
}
}, i * 100) // 每100毫秒加载一个段落
}
// 不自动开始播放,等待用户点击
console.log('分段播放模式已准备就绪,等待用户点击')
return true
} catch (error: any) {
console.error('TTS分段播放失败:', error)
this.showErrorMessage(error?.message || i18n.t('settings.tts.error.synthesis_failed'))
return false
}
}
/**
*
* @param index
* @param serviceType TTS服务类型
* @param settings
*/
private async loadSegmentAudio(index: number, serviceType: string, settings: any): Promise<void> {
if (index < 0 || index >= this.audioSegments.length) {
return
}
const segment = this.audioSegments[index]
// 如果已加载或正在加载,则跳过
if (segment.isLoaded || segment.isLoading) {
return
}
// 标记为正在加载
segment.isLoading = true
this.emitSegmentedPlaybackEvent()
try {
// 创建TTS服务
const ttsService = TTSServiceFactory.createService(serviceType, settings)
// 合成音频
const audioBlob = await ttsService.synthesize(segment.text)
// 创建音频URL
const audioUrl = URL.createObjectURL(audioBlob)
// 更新段落信息
segment.audioBlob = audioBlob
segment.audioUrl = audioUrl
segment.isLoaded = true
segment.isLoading = false
// 触发事件
this.emitSegmentedPlaybackEvent()
// 如果是当前播放的段落,且尚未开始播放,则开始播放
if (index === this.currentSegmentIndex && this.isSegmentedPlayback && !this.isPlaying) {
this.playSegment(index)
}
} catch (error) {
console.error(`加载段落音频失败 (索引: ${index}):`, error)
segment.isLoading = false
this.emitSegmentedPlaybackEvent()
}
}
/**
*
* @param index
* @returns
*/
private playSegment(index: number): boolean {
if (index < 0 || index >= this.audioSegments.length) {
return false
}
const segment = this.audioSegments[index]
// 如果段落尚未加载完成,则等待加载
if (!segment.isLoaded) {
// 如果尚未开始加载,则开始加载
if (!segment.isLoading) {
const settings = store.getState().settings
const serviceType = settings.ttsServiceType || 'openai'
this.loadSegmentAudio(index, serviceType, settings)
}
return true // 返回true表示已开始处理但尚未实际播放
}
// 更新当前段落索引
this.currentSegmentIndex = index
// 触发事件
this.emitSegmentedPlaybackEvent()
// 播放音频
if (this.audioElement && segment.audioUrl) {
this.audioElement.src = segment.audioUrl
this.audioElement.play().catch((error) => {
console.error('播放段落音频失败:', error)
})
// 更新播放状态
this.updatePlayingState(true)
// 设置音频结束事件
this.audioElement.onended = () => {
// 播放下一个段落
if (index < this.audioSegments.length - 1) {
this.playSegment(index + 1)
} else {
// 所有段落播放完毕
this.updatePlayingState(false)
// 清理资源
this.cleanupSegmentedPlayback()
}
}
return true
}
return false
}
/**
*
*/
private cleanupSegmentedPlayback(): void {
// 释放所有音频URL
for (const segment of this.audioSegments) {
if (segment.audioUrl) {
URL.revokeObjectURL(segment.audioUrl)
}
}
// 重置状态
this.audioSegments = []
this.currentSegmentIndex = 0
this.isSegmentedPlayback = false
// 触发事件
this.emitSegmentedPlaybackEvent()
console.log('分段播放已完成,资源已清理')
}
/**
*
*/
private startProgressUpdates(): void {
// 先停止现有的定时器
this.stopProgressUpdates()
// 确保音频元素存在
if (!this.audioElement) return
// 创建新的定时器每100毫秒更新一次进度
this.progressUpdateInterval = setInterval(() => {
if (this.audioElement && this.isPlaying) {
const currentTime = this.audioElement.currentTime
const duration = this.audioElement.duration || 0
// 计算进度百分比
const progress = duration > 0 ? (currentTime / duration) * 100 : 0
// 触发进度更新事件
this.emitProgressUpdateEvent(currentTime, duration, progress)
}
}, 250) // 将更新频率从100ms降低到250ms减少日志输出
}
/**
*
*/
private stopProgressUpdates(): void {
if (this.progressUpdateInterval) {
clearInterval(this.progressUpdateInterval)
this.progressUpdateInterval = null
}
}
/**
*
* @param currentTime
* @param duration
* @param progress 0-100
*/
// 记录上次输出日志的进度百分比 - 已禁用日志输出
// private lastLoggedProgress: number = -1;
// 记录上次日志输出时间,用于节流 - 已禁用日志输出
// private lastLogTime: number = 0;
private emitProgressUpdateEvent(currentTime: number, duration: number, progress: number): void {
// 创建事件数据
const eventData = {
messageId: this.playingMessageId,
isPlaying: this.isPlaying,
currentTime,
duration,
progress
}
// 完全关闭进度更新日志输出
// const now = Date.now();
// const currentProgressTens = Math.floor(progress / 10);
// if ((now - this.lastLogTime >= 500) && // 时间节流
// (currentProgressTens !== Math.floor(this.lastLoggedProgress / 10) ||
// progress === 0 || progress >= 100)) {
// console.log('发送TTS进度更新事件:', {
// messageId: this.playingMessageId ? this.playingMessageId.substring(0, 8) : null,
// progress: Math.round(progress),
// currentTime: Math.round(currentTime),
// duration: Math.round(duration)
// });
// this.lastLoggedProgress = progress;
// this.lastLogTime = now;
// }
// 触发事件
window.dispatchEvent(new CustomEvent('tts-progress-update', { detail: eventData }))
}
/**
*
*/
private emitSegmentedPlaybackEvent(): void {
// 创建事件数据
const eventData = {
isSegmentedPlayback: this.isSegmentedPlayback,
segments: this.audioSegments.map((segment) => ({
text: segment.text,
isLoaded: segment.isLoaded,
isLoading: segment.isLoading
})),
currentSegmentIndex: this.currentSegmentIndex,
isPlaying: this.isPlaying
}
// 触发事件
window.dispatchEvent(new CustomEvent('tts-segmented-playback-update', { detail: eventData }))
}
/**
*
* @param message
*/
private showErrorMessage(message: string): void {
const now = Date.now()
// 如果距离上次错误消息的时间小于节流时间,则不显示
if (now - this.lastErrorTime < this.errorThrottleTime) {
console.log('错误消息被节流:', message)
return
}
// 更新上次错误消息时间
this.lastErrorTime = now
window.message.error({ content: message, key: 'tts-error' })
}
}

View File

@ -0,0 +1,66 @@
import i18n from '@renderer/i18n'
import { EdgeTTSService } from './EdgeTTSService'
import { MsTTSService } from './MsTTSService'
import { OpenAITTSService } from './OpenAITTSService'
import { SiliconflowTTSService } from './SiliconflowTTSService'
import { TTSServiceInterface } from './TTSServiceInterface'
/**
* TTS服务工厂类
* TTS服务实例
*/
export class TTSServiceFactory {
/**
* TTS服务实例
* @param serviceType
* @param settings
* @returns TTS服务实例
*/
static createService(serviceType: string, settings: any): TTSServiceInterface {
console.log('创建TTS服务实例类型:', serviceType)
switch (serviceType) {
case 'openai':
console.log('创建OpenAI TTS服务实例')
return new OpenAITTSService(settings.ttsApiKey, settings.ttsApiUrl, settings.ttsVoice, settings.ttsModel)
case 'edge':
console.log('创建Edge TTS服务实例')
return new EdgeTTSService(settings.ttsEdgeVoice)
case 'siliconflow':
console.log('创建硅基流动 TTS服务实例')
console.log('硅基流动TTS设置:', {
apiKey: settings.ttsSiliconflowApiKey ? '已设置' : '未设置',
apiUrl: settings.ttsSiliconflowApiUrl,
voice: settings.ttsSiliconflowVoice,
model: settings.ttsSiliconflowModel,
responseFormat: settings.ttsSiliconflowResponseFormat,
speed: settings.ttsSiliconflowSpeed
})
return new SiliconflowTTSService(
settings.ttsSiliconflowApiKey,
settings.ttsSiliconflowApiUrl,
settings.ttsSiliconflowVoice,
settings.ttsSiliconflowModel,
settings.ttsSiliconflowResponseFormat,
settings.ttsSiliconflowSpeed
)
case 'mstts': {
console.log('创建免费在线TTS服务实例')
// 确保音色有默认值
const msVoice = settings.ttsMsVoice || 'zh-CN-XiaoxiaoNeural'
const msOutputFormat = settings.ttsMsOutputFormat || 'audio-24khz-48kbitrate-mono-mp3'
console.log('免费在线TTS设置:', {
voice: msVoice,
outputFormat: msOutputFormat
})
return new MsTTSService(msVoice, msOutputFormat)
} // Close block scope
default:
throw new Error(i18n.t('settings.tts.error.unsupported_service_type', { serviceType }))
}
}
}

View File

@ -0,0 +1,29 @@
/**
* TTS服务接口
* TTS服务实现类都需要实现这个接口
*/
export interface TTSServiceInterface {
/**
*
* @param text
* @returns Blob对象的Promise
*/
synthesize(text: string): Promise<Blob>
/**
* ()
* @param text
* @param onStart
* @param onData
* @param onEnd
* @param onError
* @returns ID
*/
synthesizeStream?(
text: string,
onStart: () => void,
onData: (audioChunk: AudioBuffer) => void,
onEnd: () => void,
onError: (error: Error) => void
): Promise<string>
}

View File

@ -0,0 +1,168 @@
/**
* TTS文本过滤工具类
* TTS朗读的内容
*/
export class TTSTextFilter {
/**
*
* @param text
* @param options
* @returns
*/
public static filterText(
text: string,
options: {
filterThinkingProcess: boolean
filterMarkdown: boolean
filterCodeBlocks: boolean
filterHtmlTags: boolean
filterEmojis: boolean
maxTextLength: number
}
): string {
if (!text) return ''
let filteredText = text
// 过滤思考过程
if (options.filterThinkingProcess) {
filteredText = this.filterThinkingProcess(filteredText)
}
// 过滤Markdown标记
if (options.filterMarkdown) {
filteredText = this.filterMarkdown(filteredText)
}
// 过滤代码块
if (options.filterCodeBlocks) {
filteredText = this.filterCodeBlocks(filteredText)
}
// 过滤HTML标签
if (options.filterHtmlTags) {
filteredText = this.filterHtmlTags(filteredText)
}
// 过滤表情符号
if (options.filterEmojis) {
filteredText = this.filterEmojis(filteredText)
}
// 限制文本长度
if (options.maxTextLength > 0 && filteredText.length > options.maxTextLength) {
filteredText = filteredText.substring(0, options.maxTextLength)
}
return filteredText.trim()
}
/**
*
* @param text
* @returns
*/
private static filterThinkingProcess(text: string): string {
// 过滤<think>标签内容
text = text.replace(/<think>[\s\S]*?<\/think>/g, '')
// 过滤未闭合的<think>标签
if (text.includes('<think>')) {
const parts = text.split('<think>')
text = parts[0]
}
// 过滤思考过程部分(###Thinking和###Response格式
const thinkingMatch = text.match(/###\s*Thinking[\s\S]*?(?=###\s*Response|$)/)
if (thinkingMatch) {
text = text.replace(thinkingMatch[0], '')
}
// 如果有Response部分只保留Response部分
const responseMatch = text.match(/###\s*Response\s*([\s\S]*?)(?=###|$)/)
if (responseMatch) {
text = responseMatch[1]
}
return text
}
/**
* Markdown标记
* @param text
* @returns
*/
private static filterMarkdown(text: string): string {
// 过滤标题标记
text = text.replace(/#{1,6}\s+/g, '')
// 过滤粗体和斜体标记
text = text.replace(/(\*\*|__)(.*?)\1/g, '$2')
text = text.replace(/(\*|_)(.*?)\1/g, '$2')
// 过滤链接
text = text.replace(/\[([^\]]+)\]\(([^)]+)\)/g, '$1')
// 过滤图片
text = text.replace(/!\[([^\]]*)\]\(([^)]+)\)/g, '')
// 过滤引用
text = text.replace(/^\s*>\s+/gm, '')
// 过滤水平线
text = text.replace(/^\s*[-*_]{3,}\s*$/gm, '')
// 过滤列表标记
text = text.replace(/^\s*[-*+]\s+/gm, '')
text = text.replace(/^\s*\d+\.\s+/gm, '')
return text
}
/**
*
* @param text
* @returns
*/
private static filterCodeBlocks(text: string): string {
// 过滤围栏式代码块
text = text.replace(/```[\s\S]*?```/g, '')
// 过滤缩进式代码块
text = text.replace(/(?:^|\n)( {4}|\t).*(?:\n|$)/g, '\n')
// 过滤行内代码
text = text.replace(/`([^`]+)`/g, '$1')
return text
}
/**
* HTML标签
* @param text
* @returns
*/
private static filterHtmlTags(text: string): string {
// 过滤HTML标签
text = text.replace(/<[^>]*>/g, '')
// 过滤HTML实体
text = text.replace(/&[a-zA-Z0-9#]+;/g, ' ')
return text
}
/**
*
* @param text
* @returns
*/
private static filterEmojis(text: string): string {
// 过滤Unicode表情符号
// 这个正则表达式匹配大多数常见的表情符号
return text.replace(
/[\u{1F300}-\u{1F5FF}\u{1F600}-\u{1F64F}\u{1F680}-\u{1F6FF}\u{1F700}-\u{1F77F}\u{1F780}-\u{1F7FF}\u{1F800}-\u{1F8FF}\u{1F900}-\u{1F9FF}\u{1FA00}-\u{1FA6F}\u{1FA70}-\u{1FAFF}\u{2600}-\u{26FF}\u{2700}-\u{27BF}]/gu,
''
)
}
}

View File

@ -0,0 +1,85 @@
/**
*
*
*/
export class TextSegmenter {
/**
*
* @param text
* @returns
*/
public static splitIntoSentences(text: string): string[] {
if (!text || text.trim() === '') {
return []
}
// 以句子级别的标点符号为主要分隔点,保证流畅性
// 句号、问号、感叹号、分号作为主要分隔点
const punctuationRegex = /([.;:?!。;:?!]+)/g
// 分割文本
const parts = text.split(punctuationRegex)
const segments: string[] = []
// 将标点符号与前面的文本组合
for (let i = 0; i < parts.length - 1; i += 2) {
const content = parts[i]
const punctuation = parts[i + 1] || ''
if (content.trim() || punctuation.trim()) {
segments.push((content.trim() + punctuation.trim()).trim())
}
}
// 处理最后一个部分(如果有)
if (parts.length % 2 !== 0 && parts[parts.length - 1].trim()) {
segments.push(parts[parts.length - 1].trim())
}
// 进一步处理空格和换行符
const result = segments
.filter((segment) => segment.trim().length > 0) // 过滤空的片段
.flatMap((segment) => {
// 如果片段过长,按换行符进一步分割
if (segment.length > 100) {
const subParts = segment.split(/([\n\r]+)/)
const subSegments: string[] = []
for (let i = 0; i < subParts.length; i++) {
const part = subParts[i].trim()
if (part) {
subSegments.push(part)
}
}
return subSegments.length > 0 ? subSegments : [segment]
}
return [segment]
})
// 合并过短的片段,保证流畅性
const mergedResult: string[] = []
let currentSegment = ''
for (const segment of result) {
// 如果当前片段加上新片段仍然不超过100个字符则合并
if (currentSegment && currentSegment.length + segment.length < 100) {
currentSegment += ' ' + segment
} else {
// 如果当前片段非空,则添加到结果中
if (currentSegment) {
mergedResult.push(currentSegment)
}
currentSegment = segment
}
}
// 添加最后一个片段
if (currentSegment) {
mergedResult.push(currentSegment)
}
// 如果没有成功分割,则返回原文本作为一个句子
return mergedResult.length > 0 ? mergedResult : [text]
}
}

View File

@ -0,0 +1,7 @@
export * from './EdgeTTSService'
export * from './MsTTSService'
export * from './OpenAITTSService'
export * from './SiliconflowTTSService'
export * from './TTSService'
export * from './TTSServiceFactory'
export * from './TTSServiceInterface'

View File

@ -1,6 +1,6 @@
import { createSlice, PayloadAction } from '@reduxjs/toolkit'
import { TRANSLATE_PROMPT } from '@renderer/config/prompts'
import { CodeStyleVarious, LanguageVarious, ThemeMode, TranslateLanguageVarious } from '@renderer/types'
import { CodeStyleVarious, LanguageVarious, Model, ThemeMode, TranslateLanguageVarious } from '@renderer/types'
import { IpcChannel } from '@shared/IpcChannel'
import { WebDAVSyncState } from './backup'
@ -113,8 +113,56 @@ export interface SettingsState {
showOpenedMinappsInSidebar: boolean
// 隐私设置
enableDataCollection: boolean
// TTS配置
ttsEnabled: boolean
ttsServiceType: string // TTS服务类型openai、edge、siliconflow或mstts
ttsApiKey: string
ttsApiUrl: string
ttsVoice: string
ttsModel: string
ttsCustomVoices: string[]
ttsCustomModels: string[]
showTTSProgressBar: boolean // 是否显示TTS进度条
// 浏览器 TTS配置
ttsEdgeVoice: string
// 硅基流动 TTS配置
ttsSiliconflowApiKey: string
ttsSiliconflowApiUrl: string
ttsSiliconflowVoice: string
ttsSiliconflowModel: string
ttsSiliconflowResponseFormat: string
ttsSiliconflowSpeed: number
// 免费在线 TTS配置
ttsMsVoice: string
ttsMsOutputFormat: string
// TTS过滤选项
ttsFilterOptions: {
filterThinkingProcess: boolean // 过滤思考过程
filterMarkdown: boolean // 过滤Markdown标记
filterCodeBlocks: boolean // 过滤代码块
filterHtmlTags: boolean // 过滤HTML标签
filterEmojis: boolean // 过滤表情符号
maxTextLength: number // 最大文本长度
}
// ASR配置语音识别
asrEnabled: boolean
asrServiceType: string // ASR服务类型openai或browser
asrApiKey: string
asrApiUrl: string
asrModel: string
asrAutoStartServer: boolean // 启动应用时自动启动ASR服务器
asrLanguage: string // 语音识别语言
// 语音通话配置
voiceCallEnabled: boolean
voiceCallModel: Model | null
voiceCallPrompt: string | null // 语音通话自定义提示词
isVoiceCallActive: boolean // 语音通话窗口是否激活
lastPlayedMessageId: string | null // 最后一次播放的消息ID
skipNextAutoTTS: boolean // 是否跳过下一次自动TTS
// Quick Panel Triggers
enableQuickPanelTriggers: boolean
enableBackspaceDeleteModel: boolean
// Export Menu Options
exportMenuOptions: {
image: boolean
markdown: boolean
@ -214,8 +262,54 @@ export const initialState: SettingsState = {
maxKeepAliveMinapps: 3,
showOpenedMinappsInSidebar: true,
enableDataCollection: false,
enableQuickPanelTriggers: false,
// TTS配置
ttsEnabled: false,
ttsServiceType: 'openai', // 默认使用 OpenAI TTS
ttsApiKey: '',
ttsApiUrl: 'https://api.openai.com/v1/audio/speech',
ttsVoice: '',
ttsModel: '',
ttsCustomVoices: [],
ttsCustomModels: [],
showTTSProgressBar: true, // 默认显示TTS进度条
// Edge TTS配置
ttsEdgeVoice: 'zh-CN-XiaoxiaoNeural', // 默认使用小小的声音
// 硅基流动 TTS配置
ttsSiliconflowApiKey: '',
ttsSiliconflowApiUrl: 'https://api.siliconflow.cn/v1/audio/speech',
ttsSiliconflowVoice: 'FunAudioLLM/CosyVoice2-0.5B:alex',
ttsSiliconflowModel: 'FunAudioLLM/CosyVoice2-0.5B',
ttsSiliconflowResponseFormat: 'mp3',
ttsSiliconflowSpeed: 1.0,
// 免费在线 TTS配置
ttsMsVoice: 'zh-CN-XiaoxiaoNeural',
ttsMsOutputFormat: 'audio-24khz-48kbitrate-mono-mp3',
ttsFilterOptions: {
filterThinkingProcess: true, // 默认过滤思考过程
filterMarkdown: true, // 默认过滤Markdown标记
filterCodeBlocks: true, // 默认过滤代码块
filterHtmlTags: true, // 默认过滤HTML标签
filterEmojis: true, // 默认过滤表情符号
maxTextLength: 4000 // 默认最大文本长度
},
// ASR配置语音识别
asrEnabled: false,
asrServiceType: 'openai', // 默认使用 OpenAI ASR
asrApiKey: '',
asrApiUrl: 'https://api.openai.com/v1/audio/transcriptions',
asrModel: 'whisper-1',
asrAutoStartServer: false, // 默认不自动启动ASR服务器
asrLanguage: 'zh-CN', // 默认使用中文
// 语音通话配置
voiceCallEnabled: true,
voiceCallModel: null,
voiceCallPrompt: null, // 默认为null表示使用默认提示词
isVoiceCallActive: false, // 语音通话窗口是否激活
lastPlayedMessageId: null, // 最后一次播放的消息ID
skipNextAutoTTS: false, // 是否跳过下一次自动TTS
enableQuickPanelTriggers: false, // Quick Panel Triggers
enableBackspaceDeleteModel: true,
// Export Menu Options
exportMenuOptions: {
image: true,
markdown: true,
@ -486,12 +580,195 @@ const settingsSlice = createSlice({
setEnableDataCollection: (state, action: PayloadAction<boolean>) => {
state.enableDataCollection = action.payload
},
setExportMenuOptions: (state, action: PayloadAction<typeof initialState.exportMenuOptions>) => {
state.exportMenuOptions = action.payload
// TTS相关的action
setTtsEnabled: (state, action: PayloadAction<boolean>) => {
state.ttsEnabled = action.payload
},
setTtsServiceType: (state, action: PayloadAction<string>) => {
state.ttsServiceType = action.payload
},
setTtsApiKey: (state, action: PayloadAction<string>) => {
state.ttsApiKey = action.payload
},
setTtsApiUrl: (state, action: PayloadAction<string>) => {
state.ttsApiUrl = action.payload
},
setTtsEdgeVoice: (state, action: PayloadAction<string>) => {
state.ttsEdgeVoice = action.payload
},
// 硅基流动TTS相关的action
setTtsSiliconflowApiKey: (state, action: PayloadAction<string>) => {
state.ttsSiliconflowApiKey = action.payload
},
setTtsSiliconflowApiUrl: (state, action: PayloadAction<string>) => {
state.ttsSiliconflowApiUrl = action.payload
},
setTtsSiliconflowVoice: (state, action: PayloadAction<string>) => {
state.ttsSiliconflowVoice = action.payload
},
setTtsSiliconflowModel: (state, action: PayloadAction<string>) => {
state.ttsSiliconflowModel = action.payload
},
setTtsSiliconflowResponseFormat: (state, action: PayloadAction<string>) => {
state.ttsSiliconflowResponseFormat = action.payload
},
setTtsSiliconflowSpeed: (state, action: PayloadAction<number>) => {
state.ttsSiliconflowSpeed = action.payload
},
// 免费在线TTS相关的action
setTtsMsVoice: (state, action: PayloadAction<string>) => {
state.ttsMsVoice = action.payload
},
setTtsMsOutputFormat: (state, action: PayloadAction<string>) => {
state.ttsMsOutputFormat = action.payload
},
setTtsVoice: (state, action: PayloadAction<string>) => {
state.ttsVoice = action.payload
},
setTtsModel: (state, action: PayloadAction<string>) => {
state.ttsModel = action.payload
},
setTtsCustomVoices: (state, action: PayloadAction<string[]>) => {
// 确保所有值都是字符串
state.ttsCustomVoices = action.payload
.filter((voice) => voice !== null && voice !== undefined)
.map((voice) => (typeof voice === 'string' ? voice : String(voice)))
},
setTtsCustomModels: (state, action: PayloadAction<string[]>) => {
// 确保所有值都是字符串
state.ttsCustomModels = action.payload
.filter((model) => model !== null && model !== undefined)
.map((model) => (typeof model === 'string' ? model : String(model)))
},
resetTtsCustomValues: (state) => {
// 重置所有自定义音色和模型
state.ttsCustomVoices = []
state.ttsCustomModels = []
},
addTtsCustomVoice: (state, action: PayloadAction<string>) => {
// 确保添加的是字符串
const voiceStr = typeof action.payload === 'string' ? action.payload : String(action.payload)
// 检查是否已存在相同的音色
const exists = state.ttsCustomVoices.some((voice) => {
if (typeof voice === 'string') {
return voice === voiceStr
}
return String(voice) === voiceStr
})
if (!exists) {
state.ttsCustomVoices.push(voiceStr)
}
},
addTtsCustomModel: (state, action: PayloadAction<string>) => {
// 确保添加的是字符串
const modelStr = typeof action.payload === 'string' ? action.payload : String(action.payload)
// 检查是否已存在相同的模型
const exists = state.ttsCustomModels.some((model) => {
if (typeof model === 'string') {
return model === modelStr
}
return String(model) === modelStr
})
if (!exists) {
state.ttsCustomModels.push(modelStr)
}
},
removeTtsCustomVoice: (state, action: PayloadAction<string>) => {
// 确保删除的是字符串
const voiceStr = typeof action.payload === 'string' ? action.payload : String(action.payload)
// 过滤掉要删除的音色
state.ttsCustomVoices = state.ttsCustomVoices.filter((voice) => {
if (typeof voice === 'string') {
return voice !== voiceStr
}
return String(voice) !== voiceStr
})
},
removeTtsCustomModel: (state, action: PayloadAction<string>) => {
// 确保删除的是字符串
const modelStr = typeof action.payload === 'string' ? action.payload : String(action.payload)
// 过滤掉要删除的模型
state.ttsCustomModels = state.ttsCustomModels.filter((model) => {
if (typeof model === 'string') {
return model !== modelStr
}
return String(model) !== modelStr
})
},
// TTS过滤选项的action
setTtsFilterOptions: (
state,
action: PayloadAction<{
filterThinkingProcess?: boolean
filterMarkdown?: boolean
filterCodeBlocks?: boolean
filterHtmlTags?: boolean
filterEmojis?: boolean
maxTextLength?: number
}>
) => {
state.ttsFilterOptions = {
...state.ttsFilterOptions,
...action.payload
}
},
// 设置是否显示TTS进度条
setShowTTSProgressBar: (state, action: PayloadAction<boolean>) => {
state.showTTSProgressBar = action.payload
},
// ASR相关的action
setAsrEnabled: (state, action: PayloadAction<boolean>) => {
state.asrEnabled = action.payload
},
setAsrServiceType: (state, action: PayloadAction<string>) => {
state.asrServiceType = action.payload
},
setAsrApiKey: (state, action: PayloadAction<string>) => {
state.asrApiKey = action.payload
},
setAsrApiUrl: (state, action: PayloadAction<string>) => {
state.asrApiUrl = action.payload
},
setAsrModel: (state, action: PayloadAction<string>) => {
state.asrModel = action.payload
},
setAsrAutoStartServer: (state, action: PayloadAction<boolean>) => {
state.asrAutoStartServer = action.payload
},
setAsrLanguage: (state, action: PayloadAction<string>) => {
state.asrLanguage = action.payload
},
setVoiceCallEnabled: (state, action: PayloadAction<boolean>) => {
state.voiceCallEnabled = action.payload
},
setVoiceCallModel: (state, action: PayloadAction<Model | null>) => {
state.voiceCallModel = action.payload
},
setVoiceCallPrompt: (state, action: PayloadAction<string | null>) => {
state.voiceCallPrompt = action.payload
},
setIsVoiceCallActive: (state, action: PayloadAction<boolean>) => {
state.isVoiceCallActive = action.payload
},
setLastPlayedMessageId: (state, action: PayloadAction<string | null>) => {
state.lastPlayedMessageId = action.payload
},
setSkipNextAutoTTS: (state, action: PayloadAction<boolean>) => {
state.skipNextAutoTTS = action.payload
},
// Quick Panel Triggers action
setEnableQuickPanelTriggers: (state, action: PayloadAction<boolean>) => {
state.enableQuickPanelTriggers = action.payload
},
setExportMenuOptions: (state, action: PayloadAction<typeof initialState.exportMenuOptions>) => {
state.exportMenuOptions = action.payload
},
setEnableBackspaceDeleteModel: (state, action: PayloadAction<boolean>) => {
state.enableBackspaceDeleteModel = action.payload
}
@ -583,6 +860,43 @@ export const {
setEnableDataCollection,
setEnableQuickPanelTriggers,
setExportMenuOptions,
setTtsEnabled,
setTtsServiceType,
setTtsApiKey,
setTtsApiUrl,
setTtsEdgeVoice,
setTtsSiliconflowApiKey,
setTtsSiliconflowApiUrl,
setTtsSiliconflowVoice,
setTtsSiliconflowModel,
setTtsSiliconflowResponseFormat,
setTtsSiliconflowSpeed,
setTtsMsVoice,
setTtsMsOutputFormat,
setTtsVoice,
setTtsModel,
setTtsCustomVoices,
setTtsCustomModels,
resetTtsCustomValues,
addTtsCustomVoice,
addTtsCustomModel,
removeTtsCustomVoice,
removeTtsCustomModel,
setTtsFilterOptions,
setShowTTSProgressBar,
setAsrEnabled,
setAsrServiceType,
setAsrApiKey,
setAsrApiUrl,
setAsrModel,
setAsrAutoStartServer,
setAsrLanguage,
setVoiceCallEnabled,
setVoiceCallModel,
setVoiceCallPrompt,
setIsVoiceCallActive,
setLastPlayedMessageId,
setSkipNextAutoTTS,
setEnableBackspaceDeleteModel
} = settingsSlice.actions

12
src/renderer/src/types/asr.d.ts vendored Normal file
View File

@ -0,0 +1,12 @@
interface ASRServerAPI {
startServer: () => Promise<{ success: boolean; pid?: number; error?: string }>
stopServer: (pid: number) => Promise<{ success: boolean; error?: string }>
}
interface Window {
api: {
asrServer: ASRServerAPI
// 其他API...
[key: string]: any
}
}

View File

@ -4,6 +4,24 @@ interface ObsidianAPI {
getFolders: (vaultName: string) => Promise<Array<{ path: string; type: 'folder' | 'markdown'; name: string }>>
}
interface IpcRendererAPI {
invoke: (channel: string, ...args: any[]) => Promise<any>
on: (channel: string, listener: (...args: any[]) => void) => () => void
once: (channel: string, listener: (...args: any[]) => void) => () => void
removeListener: (channel: string, listener: (...args: any[]) => void) => void
removeAllListeners: (channel: string) => void
send: (channel: string, ...args: any[]) => void
sendSync: (channel: string, ...args: any[]) => any
}
interface ElectronAPI {
ipcRenderer: IpcRendererAPI
process: {
platform: string
}
}
interface Window {
obsidian: ObsidianAPI
electron: ElectronAPI
}

View File

@ -493,3 +493,12 @@ export interface QuickPhrase {
updatedAt: number
order?: number
}
export type TTSProvider = {
id: string
name: string
apiKey?: string
apiUrl?: string
voice?: string
model?: string
}

8
temp.txt Normal file
View File

@ -0,0 +1,8 @@
// 不再自动清除回调函数,允许持续接收语音识别结果
// setTimeout(() => {
// // 发送重置命令,确保浏览器不会继续发送结果
// ASRService.cancelRecording()
//
// // 清除ASRService中的回调函数防止后续结果被处理
// ASRService.resultCallback = null
// }, 2000) // 2秒后强制取消作为安全措施

437
yarn.lock
View File

@ -3766,7 +3766,7 @@ __metadata:
languageName: node
linkType: hard
"@types/lodash@npm:^4.17.5":
"@types/lodash@npm:^4.17.16":
version: 4.17.16
resolution: "@types/lodash@npm:4.17.16"
checksum: 10c0/cf017901b8ab1d7aabc86d5189d9288f4f99f19a75caf020c0e2c77b8d4cead4db0d0b842d009b029339f92399f49f34377dd7c2721053388f251778b4c23534
@ -4224,7 +4224,7 @@ __metadata:
"@types/diff": "npm:^7"
"@types/fs-extra": "npm:^11"
"@types/js-yaml": "npm:^4"
"@types/lodash": "npm:^4.17.5"
"@types/lodash": "npm:^4.17.16"
"@types/markdown-it": "npm:^14"
"@types/md5": "npm:^2.3.5"
"@types/node": "npm:^18.19.9"
@ -4251,6 +4251,7 @@ __metadata:
diff: "npm:^7.0.0"
docx: "npm:^9.0.2"
dotenv-cli: "npm:^7.4.2"
edge-tts-node: "npm:^1.5.7"
electron: "npm:31.7.6"
electron-builder: "npm:26.0.13"
electron-devtools-installer: "npm:^3.2.0"
@ -4282,6 +4283,7 @@ __metadata:
lucide-react: "npm:^0.487.0"
markdown-it: "npm:^14.1.0"
mime: "npm:^4.0.4"
node-edge-tts: "npm:^1.2.8"
npx-scope-finder: "npm:^1.2.0"
officeparser: "npm:^4.1.1"
openai: "patch:openai@npm%3A4.87.3#~/.yarn/patches/openai-npm-4.87.3-2b30a7685f.patch"
@ -4745,6 +4747,17 @@ __metadata:
languageName: node
linkType: hard
"asn1.js@npm:^4.10.1":
version: 4.10.1
resolution: "asn1.js@npm:4.10.1"
dependencies:
bn.js: "npm:^4.0.0"
inherits: "npm:^2.0.1"
minimalistic-assert: "npm:^1.0.0"
checksum: 10c0/afa7f3ab9e31566c80175a75b182e5dba50589dcc738aa485be42bdd787e2a07246a4b034d481861123cbe646a7656f318f4f1cad2e9e5e808a210d5d6feaa88
languageName: node
linkType: hard
"asn1@npm:~0.2.3":
version: 0.2.6
resolution: "asn1@npm:0.2.6"
@ -4835,7 +4848,7 @@ __metadata:
languageName: node
linkType: hard
"axios@npm:^1.7.3, axios@npm:^1.7.7":
"axios@npm:^1.5.0, axios@npm:^1.7.3, axios@npm:^1.7.7":
version: 1.8.4
resolution: "axios@npm:1.8.4"
dependencies:
@ -4977,6 +4990,20 @@ __metadata:
languageName: node
linkType: hard
"bn.js@npm:^4.0.0, bn.js@npm:^4.1.0, bn.js@npm:^4.11.9":
version: 4.12.1
resolution: "bn.js@npm:4.12.1"
checksum: 10c0/b7f37a0cd5e4b79142b6f4292d518b416be34ae55d6dd6b0f66f96550c8083a50ffbbf8bda8d0ab471158cb81aa74ea4ee58fe33c7802e4a30b13810e98df116
languageName: node
linkType: hard
"bn.js@npm:^5.2.1":
version: 5.2.1
resolution: "bn.js@npm:5.2.1"
checksum: 10c0/bed3d8bd34ec89dbcf9f20f88bd7d4a49c160fda3b561c7bb227501f974d3e435a48fb9b61bc3de304acab9215a3bda0803f7017ffb4d0016a0c3a740a283caa
languageName: node
linkType: hard
"body-parser@npm:^2.0.1":
version: 2.1.0
resolution: "body-parser@npm:2.1.0"
@ -5029,6 +5056,13 @@ __metadata:
languageName: node
linkType: hard
"brorand@npm:^1.0.1, brorand@npm:^1.1.0":
version: 1.1.0
resolution: "brorand@npm:1.1.0"
checksum: 10c0/6f366d7c4990f82c366e3878492ba9a372a73163c09871e80d82fb4ae0d23f9f8924cb8a662330308206e6b3b76ba1d528b4601c9ef73c2166b440b2ea3b7571
languageName: node
linkType: hard
"browser-image-compression@npm:^2.0.2":
version: 2.0.2
resolution: "browser-image-compression@npm:2.0.2"
@ -5038,6 +5072,72 @@ __metadata:
languageName: node
linkType: hard
"browserify-aes@npm:^1.0.4, browserify-aes@npm:^1.2.0":
version: 1.2.0
resolution: "browserify-aes@npm:1.2.0"
dependencies:
buffer-xor: "npm:^1.0.3"
cipher-base: "npm:^1.0.0"
create-hash: "npm:^1.1.0"
evp_bytestokey: "npm:^1.0.3"
inherits: "npm:^2.0.1"
safe-buffer: "npm:^5.0.1"
checksum: 10c0/967f2ae60d610b7b252a4cbb55a7a3331c78293c94b4dd9c264d384ca93354c089b3af9c0dd023534efdc74ffbc82510f7ad4399cf82bc37bc07052eea485f18
languageName: node
linkType: hard
"browserify-cipher@npm:^1.0.1":
version: 1.0.1
resolution: "browserify-cipher@npm:1.0.1"
dependencies:
browserify-aes: "npm:^1.0.4"
browserify-des: "npm:^1.0.0"
evp_bytestokey: "npm:^1.0.0"
checksum: 10c0/aa256dcb42bc53a67168bbc94ab85d243b0a3b56109dee3b51230b7d010d9b78985ffc1fb36e145c6e4db151f888076c1cfc207baf1525d3e375cbe8187fe27d
languageName: node
linkType: hard
"browserify-des@npm:^1.0.0":
version: 1.0.2
resolution: "browserify-des@npm:1.0.2"
dependencies:
cipher-base: "npm:^1.0.1"
des.js: "npm:^1.0.0"
inherits: "npm:^2.0.1"
safe-buffer: "npm:^5.1.2"
checksum: 10c0/943eb5d4045eff80a6cde5be4e5fbb1f2d5002126b5a4789c3c1aae3cdddb1eb92b00fb92277f512288e5c6af330730b1dbabcf7ce0923e749e151fcee5a074d
languageName: node
linkType: hard
"browserify-rsa@npm:^4.0.0, browserify-rsa@npm:^4.1.0":
version: 4.1.1
resolution: "browserify-rsa@npm:4.1.1"
dependencies:
bn.js: "npm:^5.2.1"
randombytes: "npm:^2.1.0"
safe-buffer: "npm:^5.2.1"
checksum: 10c0/b650ee1192e3d7f3d779edc06dd96ed8720362e72ac310c367b9d7fe35f7e8dbb983c1829142b2b3215458be8bf17c38adc7224920843024ed8cf39e19c513c0
languageName: node
linkType: hard
"browserify-sign@npm:^4.2.3":
version: 4.2.3
resolution: "browserify-sign@npm:4.2.3"
dependencies:
bn.js: "npm:^5.2.1"
browserify-rsa: "npm:^4.1.0"
create-hash: "npm:^1.2.0"
create-hmac: "npm:^1.1.7"
elliptic: "npm:^6.5.5"
hash-base: "npm:~3.0"
inherits: "npm:^2.0.4"
parse-asn1: "npm:^5.1.7"
readable-stream: "npm:^2.3.8"
safe-buffer: "npm:^5.2.1"
checksum: 10c0/30c0eba3f5970a20866a4d3fbba2c5bd1928cd24f47faf995f913f1499214c6f3be14bb4d6ec1ab5c6cafb1eca9cb76ba1c2e1c04ed018370634d4e659c77216
languageName: node
linkType: hard
"browserslist@npm:^4.21.1, browserslist@npm:^4.24.0":
version: 4.24.4
resolution: "browserslist@npm:4.24.4"
@ -5104,6 +5204,13 @@ __metadata:
languageName: node
linkType: hard
"buffer-xor@npm:^1.0.3":
version: 1.0.3
resolution: "buffer-xor@npm:1.0.3"
checksum: 10c0/fd269d0e0bf71ecac3146187cfc79edc9dbb054e2ee69b4d97dfb857c6d997c33de391696d04bdd669272751fa48e7872a22f3a6c7b07d6c0bc31dbe02a4075c
languageName: node
linkType: hard
"buffer@npm:^5.1.0, buffer@npm:^5.2.0, buffer@npm:^5.2.1, buffer@npm:^5.5.0":
version: 5.7.1
resolution: "buffer@npm:5.7.1"
@ -5502,6 +5609,16 @@ __metadata:
languageName: node
linkType: hard
"cipher-base@npm:^1.0.0, cipher-base@npm:^1.0.1, cipher-base@npm:^1.0.3":
version: 1.0.6
resolution: "cipher-base@npm:1.0.6"
dependencies:
inherits: "npm:^2.0.4"
safe-buffer: "npm:^5.2.1"
checksum: 10c0/f73268e0ee6585800875d9748f2a2377ae7c2c3375cba346f75598ac6f6bc3a25dec56e984a168ced1a862529ffffe615363f750c40349039d96bd30fba0fca8
languageName: node
linkType: hard
"classcat@npm:^5.0.3":
version: 5.0.5
resolution: "classcat@npm:5.0.5"
@ -5924,6 +6041,43 @@ __metadata:
languageName: node
linkType: hard
"create-ecdh@npm:^4.0.4":
version: 4.0.4
resolution: "create-ecdh@npm:4.0.4"
dependencies:
bn.js: "npm:^4.1.0"
elliptic: "npm:^6.5.3"
checksum: 10c0/77b11a51360fec9c3bce7a76288fc0deba4b9c838d5fb354b3e40c59194d23d66efe6355fd4b81df7580da0661e1334a235a2a5c040b7569ba97db428d466e7f
languageName: node
linkType: hard
"create-hash@npm:^1.1.0, create-hash@npm:^1.1.2, create-hash@npm:^1.2.0":
version: 1.2.0
resolution: "create-hash@npm:1.2.0"
dependencies:
cipher-base: "npm:^1.0.1"
inherits: "npm:^2.0.1"
md5.js: "npm:^1.3.4"
ripemd160: "npm:^2.0.1"
sha.js: "npm:^2.4.0"
checksum: 10c0/d402e60e65e70e5083cb57af96d89567954d0669e90550d7cec58b56d49c4b193d35c43cec8338bc72358198b8cbf2f0cac14775b651e99238e1cf411490f915
languageName: node
linkType: hard
"create-hmac@npm:^1.1.4, create-hmac@npm:^1.1.7":
version: 1.1.7
resolution: "create-hmac@npm:1.1.7"
dependencies:
cipher-base: "npm:^1.0.3"
create-hash: "npm:^1.1.0"
inherits: "npm:^2.0.1"
ripemd160: "npm:^2.0.0"
safe-buffer: "npm:^5.0.1"
sha.js: "npm:^2.4.8"
checksum: 10c0/24332bab51011652a9a0a6d160eed1e8caa091b802335324ae056b0dcb5acbc9fcf173cf10d128eba8548c3ce98dfa4eadaa01bd02f44a34414baee26b651835
languageName: node
linkType: hard
"cross-spawn@npm:^7.0.1, cross-spawn@npm:^7.0.3, cross-spawn@npm:^7.0.6":
version: 7.0.6
resolution: "cross-spawn@npm:7.0.6"
@ -5942,6 +6096,26 @@ __metadata:
languageName: node
linkType: hard
"crypto-browserify@npm:^3.12.0":
version: 3.12.1
resolution: "crypto-browserify@npm:3.12.1"
dependencies:
browserify-cipher: "npm:^1.0.1"
browserify-sign: "npm:^4.2.3"
create-ecdh: "npm:^4.0.4"
create-hash: "npm:^1.2.0"
create-hmac: "npm:^1.1.7"
diffie-hellman: "npm:^5.0.3"
hash-base: "npm:~3.0.4"
inherits: "npm:^2.0.4"
pbkdf2: "npm:^3.1.2"
public-encrypt: "npm:^4.0.3"
randombytes: "npm:^2.1.0"
randomfill: "npm:^1.0.4"
checksum: 10c0/184a2def7b16628e79841243232ab5497f18d8e158ac21b7ce90ab172427d0a892a561280adc08f9d4d517bce8db2a5b335dc21abb970f787f8e874bd7b9db7d
languageName: node
linkType: hard
"css-box-model@npm:^1.2.1":
version: 1.2.1
resolution: "css-box-model@npm:1.2.1"
@ -6655,6 +6829,16 @@ __metadata:
languageName: node
linkType: hard
"des.js@npm:^1.0.0":
version: 1.1.0
resolution: "des.js@npm:1.1.0"
dependencies:
inherits: "npm:^2.0.1"
minimalistic-assert: "npm:^1.0.0"
checksum: 10c0/671354943ad67493e49eb4c555480ab153edd7cee3a51c658082fcde539d2690ed2a4a0b5d1f401f9cde822edf3939a6afb2585f32c091f2d3a1b1665cd45236
languageName: node
linkType: hard
"destroy@npm:^1.2.0":
version: 1.2.0
resolution: "destroy@npm:1.2.0"
@ -6726,6 +6910,17 @@ __metadata:
languageName: node
linkType: hard
"diffie-hellman@npm:^5.0.3":
version: 5.0.3
resolution: "diffie-hellman@npm:5.0.3"
dependencies:
bn.js: "npm:^4.1.0"
miller-rabin: "npm:^4.0.0"
randombytes: "npm:^2.0.0"
checksum: 10c0/ce53ccafa9ca544b7fc29b08a626e23a9b6562efc2a98559a0c97b4718937cebaa9b5d7d0a05032cc9c1435e9b3c1532b9e9bf2e0ede868525922807ad6e1ecf
languageName: node
linkType: hard
"dingbat-to-unicode@npm:^1.0.1":
version: 1.0.1
resolution: "dingbat-to-unicode@npm:1.0.1"
@ -6962,6 +7157,22 @@ __metadata:
languageName: node
linkType: hard
"edge-tts-node@npm:^1.5.7":
version: 1.5.7
resolution: "edge-tts-node@npm:1.5.7"
dependencies:
axios: "npm:^1.5.0"
buffer: "npm:^6.0.3"
crypto-browserify: "npm:^3.12.0"
isomorphic-ws: "npm:^5.0.0"
process: "npm:^0.11.10"
randombytes: "npm:^2.1.0"
stream-browserify: "npm:^3.0.0"
ws: "npm:^8.14.1"
checksum: 10c0/83b5df1d5312163006643fb6e6a9ca37ca6bc4c871b66f0c800f3e0bb1b2473fa0d67c125b256e2d93b493b8fbf59aaab06f60d4fa15fe3b7e2d3fb796016b1f
languageName: node
linkType: hard
"ee-first@npm:1.1.1":
version: 1.1.1
resolution: "ee-first@npm:1.1.1"
@ -7127,6 +7338,21 @@ __metadata:
languageName: node
linkType: hard
"elliptic@npm:^6.5.3, elliptic@npm:^6.5.5":
version: 6.6.1
resolution: "elliptic@npm:6.6.1"
dependencies:
bn.js: "npm:^4.11.9"
brorand: "npm:^1.1.0"
hash.js: "npm:^1.0.0"
hmac-drbg: "npm:^1.0.1"
inherits: "npm:^2.0.4"
minimalistic-assert: "npm:^1.0.1"
minimalistic-crypto-utils: "npm:^1.0.1"
checksum: 10c0/8b24ef782eec8b472053793ea1e91ae6bee41afffdfcb78a81c0a53b191e715cbe1292aa07165958a9bbe675bd0955142560b1a007ffce7d6c765bcaf951a867
languageName: node
linkType: hard
"emittery@npm:^1.0.3":
version: 1.1.0
resolution: "emittery@npm:1.1.0"
@ -7883,6 +8109,17 @@ __metadata:
languageName: node
linkType: hard
"evp_bytestokey@npm:^1.0.0, evp_bytestokey@npm:^1.0.3":
version: 1.0.3
resolution: "evp_bytestokey@npm:1.0.3"
dependencies:
md5.js: "npm:^1.3.4"
node-gyp: "npm:latest"
safe-buffer: "npm:^5.1.1"
checksum: 10c0/77fbe2d94a902a80e9b8f5a73dcd695d9c14899c5e82967a61b1fc6cbbb28c46552d9b127cff47c45fcf684748bdbcfa0a50410349109de87ceb4b199ef6ee99
languageName: node
linkType: hard
"execa@npm:^8.0.1":
version: 8.0.1
resolution: "execa@npm:8.0.1"
@ -9059,7 +9296,28 @@ __metadata:
languageName: node
linkType: hard
"hash.js@npm:^1.1.7":
"hash-base@npm:^3.0.0":
version: 3.1.0
resolution: "hash-base@npm:3.1.0"
dependencies:
inherits: "npm:^2.0.4"
readable-stream: "npm:^3.6.0"
safe-buffer: "npm:^5.2.0"
checksum: 10c0/663eabcf4173326fbb65a1918a509045590a26cc7e0964b754eef248d281305c6ec9f6b31cb508d02ffca383ab50028180ce5aefe013e942b44a903ac8dc80d0
languageName: node
linkType: hard
"hash-base@npm:~3.0, hash-base@npm:~3.0.4":
version: 3.0.5
resolution: "hash-base@npm:3.0.5"
dependencies:
inherits: "npm:^2.0.4"
safe-buffer: "npm:^5.2.1"
checksum: 10c0/6dc185b79bad9b6d525cd132a588e4215380fdc36fec6f7a8a58c5db8e3b642557d02ad9c367f5e476c7c3ad3ccffa3607f308b124e1ed80e3b80a1b254db61e
languageName: node
linkType: hard
"hash.js@npm:^1.0.0, hash.js@npm:^1.0.3, hash.js@npm:^1.1.7":
version: 1.1.7
resolution: "hash.js@npm:1.1.7"
dependencies:
@ -9283,6 +9541,17 @@ __metadata:
languageName: node
linkType: hard
"hmac-drbg@npm:^1.0.1":
version: 1.0.1
resolution: "hmac-drbg@npm:1.0.1"
dependencies:
hash.js: "npm:^1.0.3"
minimalistic-assert: "npm:^1.0.0"
minimalistic-crypto-utils: "npm:^1.0.1"
checksum: 10c0/f3d9ba31b40257a573f162176ac5930109816036c59a09f901eb2ffd7e5e705c6832bedfff507957125f2086a0ab8f853c0df225642a88bf1fcaea945f20600d
languageName: node
linkType: hard
"hoist-non-react-statics@npm:^3.3.0, hoist-non-react-statics@npm:^3.3.2":
version: 3.3.2
resolution: "hoist-non-react-statics@npm:3.3.2"
@ -9652,7 +9921,7 @@ __metadata:
languageName: node
linkType: hard
"inherits@npm:2, inherits@npm:2.0.4, inherits@npm:^2.0.3, inherits@npm:^2.0.4, inherits@npm:~2.0.3":
"inherits@npm:2, inherits@npm:2.0.4, inherits@npm:^2.0.1, inherits@npm:^2.0.3, inherits@npm:^2.0.4, inherits@npm:~2.0.3, inherits@npm:~2.0.4":
version: 2.0.4
resolution: "inherits@npm:2.0.4"
checksum: 10c0/4e531f648b29039fb7426fb94075e6545faa1eb9fe83c29f0b6d9e7263aceb4289d2d4557db0d428188eeb449cc7c5e77b0a0b2c4e248ff2a65933a0dee49ef2
@ -10041,6 +10310,15 @@ __metadata:
languageName: node
linkType: hard
"isomorphic-ws@npm:^5.0.0":
version: 5.0.0
resolution: "isomorphic-ws@npm:5.0.0"
peerDependencies:
ws: "*"
checksum: 10c0/a058ac8b5e6efe9e46252cb0bc67fd325005d7216451d1a51238bc62d7da8486f828ef017df54ddf742e0fffcbe4b1bcc2a66cc115b027ed0180334cd18df252
languageName: node
linkType: hard
"isstream@npm:~0.1.2":
version: 0.1.2
resolution: "isstream@npm:0.1.2"
@ -10991,6 +11269,17 @@ __metadata:
languageName: node
linkType: hard
"md5.js@npm:^1.3.4":
version: 1.3.5
resolution: "md5.js@npm:1.3.5"
dependencies:
hash-base: "npm:^3.0.0"
inherits: "npm:^2.0.1"
safe-buffer: "npm:^5.1.2"
checksum: 10c0/b7bd75077f419c8e013fc4d4dada48be71882e37d69a44af65a2f2804b91e253441eb43a0614423a1c91bb830b8140b0dc906bc797245e2e275759584f4efcc5
languageName: node
linkType: hard
"md5@npm:^2.3.0":
version: 2.3.0
resolution: "md5@npm:2.3.0"
@ -11907,6 +12196,18 @@ __metadata:
languageName: node
linkType: hard
"miller-rabin@npm:^4.0.0":
version: 4.0.1
resolution: "miller-rabin@npm:4.0.1"
dependencies:
bn.js: "npm:^4.0.0"
brorand: "npm:^1.0.1"
bin:
miller-rabin: bin/miller-rabin
checksum: 10c0/26b2b96f6e49dbcff7faebb78708ed2f5f9ae27ac8cbbf1d7c08f83cf39bed3d418c0c11034dce997da70d135cc0ff6f3a4c15dc452f8e114c11986388a64346
languageName: node
linkType: hard
"mime-db@npm:1.52.0":
version: 1.52.0
resolution: "mime-db@npm:1.52.0"
@ -12024,13 +12325,20 @@ __metadata:
languageName: node
linkType: hard
"minimalistic-assert@npm:^1.0.1":
"minimalistic-assert@npm:^1.0.0, minimalistic-assert@npm:^1.0.1":
version: 1.0.1
resolution: "minimalistic-assert@npm:1.0.1"
checksum: 10c0/96730e5601cd31457f81a296f521eb56036e6f69133c0b18c13fe941109d53ad23a4204d946a0d638d7f3099482a0cec8c9bb6d642604612ce43ee536be3dddd
languageName: node
linkType: hard
"minimalistic-crypto-utils@npm:^1.0.1":
version: 1.0.1
resolution: "minimalistic-crypto-utils@npm:1.0.1"
checksum: 10c0/790ecec8c5c73973a4fbf2c663d911033e8494d5fb0960a4500634766ab05d6107d20af896ca2132e7031741f19888154d44b2408ada0852446705441383e9f8
languageName: node
linkType: hard
"minimatch@npm:^10.0.0":
version: 10.0.1
resolution: "minimatch@npm:10.0.1"
@ -12392,6 +12700,19 @@ __metadata:
languageName: node
linkType: hard
"node-edge-tts@npm:^1.2.8":
version: 1.2.8
resolution: "node-edge-tts@npm:1.2.8"
dependencies:
https-proxy-agent: "npm:^7.0.1"
ws: "npm:^8.13.0"
yargs: "npm:^17.7.2"
bin:
node-edge-tts: bin.js
checksum: 10c0/6d70ab660a0a82cf7b87dfa61c0680a9bce3b38a9b58ca1075d4a0a8f7ccbdb17355c995e6ca92cf5ec0260c967bbf9961e7762f6db182087aa3b3e26d7b2077
languageName: node
linkType: hard
"node-ensure@npm:^0.0.0":
version: 0.0.0
resolution: "node-ensure@npm:0.0.0"
@ -13114,6 +13435,20 @@ __metadata:
languageName: node
linkType: hard
"parse-asn1@npm:^5.0.0, parse-asn1@npm:^5.1.7":
version: 5.1.7
resolution: "parse-asn1@npm:5.1.7"
dependencies:
asn1.js: "npm:^4.10.1"
browserify-aes: "npm:^1.2.0"
evp_bytestokey: "npm:^1.0.3"
hash-base: "npm:~3.0"
pbkdf2: "npm:^3.1.2"
safe-buffer: "npm:^5.2.1"
checksum: 10c0/05eb5937405c904eb5a7f3633bab1acc11f4ae3478a07ef5c6d81ce88c3c0e505ff51f9c7b935ebc1265c868343793698fc91025755a895d0276f620f95e8a82
languageName: node
linkType: hard
"parse-bmfont-ascii@npm:^1.0.3":
version: 1.0.6
resolution: "parse-bmfont-ascii@npm:1.0.6"
@ -13302,6 +13637,19 @@ __metadata:
languageName: node
linkType: hard
"pbkdf2@npm:^3.1.2":
version: 3.1.2
resolution: "pbkdf2@npm:3.1.2"
dependencies:
create-hash: "npm:^1.1.2"
create-hmac: "npm:^1.1.4"
ripemd160: "npm:^2.0.1"
safe-buffer: "npm:^5.0.1"
sha.js: "npm:^2.4.8"
checksum: 10c0/5a30374e87d33fa080a92734d778cf172542cc7e41b96198c4c88763997b62d7850de3fbda5c3111ddf79805ee7c1da7046881c90ac4920b5e324204518b05fd
languageName: node
linkType: hard
"pdf-parse@npm:1.1.1":
version: 1.1.1
resolution: "pdf-parse@npm:1.1.1"
@ -13709,6 +14057,20 @@ __metadata:
languageName: node
linkType: hard
"public-encrypt@npm:^4.0.3":
version: 4.0.3
resolution: "public-encrypt@npm:4.0.3"
dependencies:
bn.js: "npm:^4.1.0"
browserify-rsa: "npm:^4.0.0"
create-hash: "npm:^1.1.0"
parse-asn1: "npm:^5.0.0"
randombytes: "npm:^2.0.1"
safe-buffer: "npm:^5.1.2"
checksum: 10c0/6c2cc19fbb554449e47f2175065d6b32f828f9b3badbee4c76585ac28ae8641aafb9bb107afc430c33c5edd6b05dbe318df4f7d6d7712b1093407b11c4280700
languageName: node
linkType: hard
"pump@npm:^3.0.0":
version: 3.0.2
resolution: "pump@npm:3.0.2"
@ -13793,6 +14155,25 @@ __metadata:
languageName: node
linkType: hard
"randombytes@npm:^2.0.0, randombytes@npm:^2.0.1, randombytes@npm:^2.0.5, randombytes@npm:^2.1.0":
version: 2.1.0
resolution: "randombytes@npm:2.1.0"
dependencies:
safe-buffer: "npm:^5.1.0"
checksum: 10c0/50395efda7a8c94f5dffab564f9ff89736064d32addf0cc7e8bf5e4166f09f8ded7a0849ca6c2d2a59478f7d90f78f20d8048bca3cdf8be09d8e8a10790388f3
languageName: node
linkType: hard
"randomfill@npm:^1.0.4":
version: 1.0.4
resolution: "randomfill@npm:1.0.4"
dependencies:
randombytes: "npm:^2.0.5"
safe-buffer: "npm:^5.1.0"
checksum: 10c0/11aeed35515872e8f8a2edec306734e6b74c39c46653607f03c68385ab8030e2adcc4215f76b5e4598e028c4750d820afd5c65202527d831d2a5f207fe2bc87c
languageName: node
linkType: hard
"range-parser@npm:^1.2.1, range-parser@npm:~1.2.1":
version: 1.2.1
resolution: "range-parser@npm:1.2.1"
@ -14577,7 +14958,7 @@ __metadata:
languageName: node
linkType: hard
"readable-stream@npm:3, readable-stream@npm:^3.1.1, readable-stream@npm:^3.4.0, readable-stream@npm:^3.6.0":
"readable-stream@npm:3, readable-stream@npm:^3.1.1, readable-stream@npm:^3.4.0, readable-stream@npm:^3.5.0, readable-stream@npm:^3.6.0":
version: 3.6.2
resolution: "readable-stream@npm:3.6.2"
dependencies:
@ -14588,7 +14969,7 @@ __metadata:
languageName: node
linkType: hard
"readable-stream@npm:^2.0.6, readable-stream@npm:^2.2.2, readable-stream@npm:^2.3.0, readable-stream@npm:^2.3.5, readable-stream@npm:~2.3.6":
"readable-stream@npm:^2.0.6, readable-stream@npm:^2.2.2, readable-stream@npm:^2.3.0, readable-stream@npm:^2.3.5, readable-stream@npm:^2.3.8, readable-stream@npm:~2.3.6":
version: 2.3.8
resolution: "readable-stream@npm:2.3.8"
dependencies:
@ -15087,6 +15468,16 @@ __metadata:
languageName: node
linkType: hard
"ripemd160@npm:^2.0.0, ripemd160@npm:^2.0.1":
version: 2.0.2
resolution: "ripemd160@npm:2.0.2"
dependencies:
hash-base: "npm:^3.0.0"
inherits: "npm:^2.0.1"
checksum: 10c0/f6f0df78817e78287c766687aed4d5accbebc308a8e7e673fb085b9977473c1f139f0c5335d353f172a915bb288098430755d2ad3c4f30612f4dd0c901cd2c3a
languageName: node
linkType: hard
"roarr@npm:^2.15.3":
version: 2.15.4
resolution: "roarr@npm:2.15.4"
@ -15229,6 +15620,7 @@ __metadata:
languageName: node
linkType: hard
<<<<<<< HEAD
"rw@npm:1":
version: 1.3.3
resolution: "rw@npm:1.3.3"
@ -15237,6 +15629,9 @@ __metadata:
linkType: hard
"safe-buffer@npm:5.2.1, safe-buffer@npm:^5.0.1, safe-buffer@npm:^5.1.1, safe-buffer@npm:^5.1.2, safe-buffer@npm:^5.2.1, safe-buffer@npm:~5.2.0":
=======
"safe-buffer@npm:5.2.1, safe-buffer@npm:^5.0.1, safe-buffer@npm:^5.1.0, safe-buffer@npm:^5.1.1, safe-buffer@npm:^5.1.2, safe-buffer@npm:^5.2.0, safe-buffer@npm:^5.2.1, safe-buffer@npm:~5.2.0":
>>>>>>> origin/1600822305-patch-2
version: 5.2.1
resolution: "safe-buffer@npm:5.2.1"
checksum: 10c0/6501914237c0a86e9675d4e51d89ca3c21ffd6a31642efeba25ad65720bce6921c9e7e974e5be91a786b25aa058b5303285d3c15dbabf983a919f5f630d349f3
@ -15432,6 +15827,18 @@ __metadata:
languageName: node
linkType: hard
"sha.js@npm:^2.4.0, sha.js@npm:^2.4.8":
version: 2.4.11
resolution: "sha.js@npm:2.4.11"
dependencies:
inherits: "npm:^2.0.1"
safe-buffer: "npm:^5.0.1"
bin:
sha.js: ./bin.js
checksum: 10c0/b7a371bca8821c9cc98a0aeff67444a03d48d745cb103f17228b96793f455f0eb0a691941b89ea1e60f6359207e36081d9be193252b0f128e0daf9cfea2815a5
languageName: node
linkType: hard
"shallowequal@npm:1.1.0":
version: 1.1.0
resolution: "shallowequal@npm:1.1.0"
@ -15820,6 +16227,16 @@ __metadata:
languageName: node
linkType: hard
"stream-browserify@npm:^3.0.0":
version: 3.0.0
resolution: "stream-browserify@npm:3.0.0"
dependencies:
inherits: "npm:~2.0.4"
readable-stream: "npm:^3.5.0"
checksum: 10c0/ec3b975a4e0aa4b3dc5e70ffae3fc8fd29ac725353a14e72f213dff477b00330140ad014b163a8cbb9922dfe90803f81a5ea2b269e1bbfd8bd71511b88f889ad
languageName: node
linkType: hard
"stream-head@npm:^3.0.0":
version: 3.0.0
resolution: "stream-head@npm:3.0.0"
@ -17393,7 +17810,7 @@ __metadata:
languageName: node
linkType: hard
"ws@npm:^8.13.0, ws@npm:^8.18.0":
"ws@npm:^8.13.0, ws@npm:^8.14.1, ws@npm:^8.18.0":
version: 8.18.1
resolution: "ws@npm:8.18.1"
peerDependencies:
@ -17600,7 +18017,7 @@ __metadata:
languageName: node
linkType: hard
"yargs@npm:^17.0.1, yargs@npm:^17.5.1, yargs@npm:^17.6.2":
"yargs@npm:^17.0.1, yargs@npm:^17.5.1, yargs@npm:^17.6.2, yargs@npm:^17.7.2":
version: 17.7.2
resolution: "yargs@npm:17.7.2"
dependencies: