mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-24 18:50:56 +08:00
添加了 TTS 相关服务并更新了设置
This commit is contained in:
parent
d5cededd8b
commit
a7a16272d3
@ -11,6 +11,7 @@ import { Button, Space, Tooltip } from 'antd'
|
||||
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useDispatch } from 'react-redux'
|
||||
import { Action } from 'redux'
|
||||
import styled from 'styled-components'
|
||||
|
||||
import { VoiceCallService } from '../services/VoiceCallService'
|
||||
@ -59,6 +60,16 @@ const DraggableVoiceCallWindow: React.FC<Props> = ({
|
||||
const [isPaused, setIsPaused] = useState(false)
|
||||
const [isMuted, setIsMuted] = useState(false)
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
const memoizedOnClose = useCallback(() => {
|
||||
onClose();
|
||||
}, []);
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
const memoizedDispatch = useCallback((action: Action) => {
|
||||
dispatch(action);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const startVoiceCall = async () => {
|
||||
try {
|
||||
@ -88,7 +99,7 @@ const DraggableVoiceCallWindow: React.FC<Props> = ({
|
||||
} catch (error) {
|
||||
console.error('Voice call error:', error)
|
||||
window.message.error({ content: t('voice_call.error'), key: 'voice-call-init' })
|
||||
onClose()
|
||||
memoizedOnClose()
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,11 +112,11 @@ const DraggableVoiceCallWindow: React.FC<Props> = ({
|
||||
|
||||
if (visible) {
|
||||
// 更新语音通话窗口状态
|
||||
dispatch(setIsVoiceCallActive(true))
|
||||
memoizedDispatch(setIsVoiceCallActive(true))
|
||||
// 重置最后播放的消息ID,确保不会自动播放已有消息
|
||||
dispatch(setLastPlayedMessageId(null))
|
||||
memoizedDispatch(setLastPlayedMessageId(null))
|
||||
// 设置跳过下一次自动TTS,确保打开窗口时不会自动播放最后一条消息
|
||||
dispatch(setSkipNextAutoTTS(true))
|
||||
memoizedDispatch(setSkipNextAutoTTS(true))
|
||||
startVoiceCall()
|
||||
// 添加事件监听器
|
||||
window.addEventListener('tts-state-change', handleTTSStateChange as EventListener)
|
||||
@ -113,12 +124,13 @@ const DraggableVoiceCallWindow: React.FC<Props> = ({
|
||||
|
||||
return () => {
|
||||
// 更新语音通话窗口状态
|
||||
dispatch(setIsVoiceCallActive(false))
|
||||
memoizedDispatch(setIsVoiceCallActive(false))
|
||||
VoiceCallService.endCall()
|
||||
// 移除事件监听器
|
||||
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener)
|
||||
}
|
||||
}, [visible, t, dispatch, onClose])
|
||||
// 使用 memoizedOnClose 和 memoizedDispatch 代替原始的 onClose 和 dispatch
|
||||
}, [visible, t, memoizedDispatch, memoizedOnClose])
|
||||
|
||||
// 拖拽相关处理
|
||||
const handleDragStart = (e: React.MouseEvent) => {
|
||||
|
||||
@ -29,11 +29,6 @@ const VoiceCallModal: React.FC<Props> = ({ visible, onClose }) => {
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
const [isProcessing, setIsProcessing] = useState(false);
|
||||
|
||||
const handleClose = () => {
|
||||
VoiceCallService.endCall();
|
||||
onClose();
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const startVoiceCall = async () => {
|
||||
try {
|
||||
@ -83,7 +78,12 @@ const VoiceCallModal: React.FC<Props> = ({ visible, onClose }) => {
|
||||
// 移除事件监听器
|
||||
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener);
|
||||
};
|
||||
}, [visible, t, handleClose]);
|
||||
}, [visible, t]);
|
||||
|
||||
const handleClose = () => {
|
||||
VoiceCallService.endCall();
|
||||
onClose();
|
||||
};
|
||||
|
||||
const toggleMute = () => {
|
||||
const newMuteState = !isMuted;
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useDispatch } from 'react-redux';
|
||||
import { Action } from 'redux';
|
||||
import styled from 'styled-components';
|
||||
import { Button, Space, Tooltip } from 'antd';
|
||||
import {
|
||||
@ -23,6 +24,17 @@ const VoiceCallWindow: React.FC = () => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useDispatch();
|
||||
|
||||
// 使用 useCallback 包装 dispatch 和 handleClose 函数,避免无限循环
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
const memoizedDispatch = useCallback((action: Action) => {
|
||||
dispatch(action);
|
||||
}, []);
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
const memoizedHandleClose = useCallback(() => {
|
||||
window.close();
|
||||
}, []);
|
||||
|
||||
// 语音通话状态
|
||||
const [transcript, setTranscript] = useState('');
|
||||
const [isListening, setIsListening] = useState(false);
|
||||
@ -61,7 +73,7 @@ const VoiceCallWindow: React.FC = () => {
|
||||
} catch (error) {
|
||||
console.error('Voice call error:', error);
|
||||
window.message.error({ content: t('voice_call.error'), key: 'voice-call-init' });
|
||||
handleClose();
|
||||
memoizedHandleClose();
|
||||
}
|
||||
};
|
||||
|
||||
@ -73,11 +85,11 @@ const VoiceCallWindow: React.FC = () => {
|
||||
};
|
||||
|
||||
// 更新语音通话窗口状态
|
||||
dispatch(setIsVoiceCallActive(true));
|
||||
memoizedDispatch(setIsVoiceCallActive(true));
|
||||
// 重置最后播放的消息ID,确保不会自动播放已有消息
|
||||
dispatch(setLastPlayedMessageId(null));
|
||||
memoizedDispatch(setLastPlayedMessageId(null));
|
||||
// 设置跳过下一次自动TTS,确保打开窗口时不会自动播放最后一条消息
|
||||
dispatch(setSkipNextAutoTTS(true));
|
||||
memoizedDispatch(setSkipNextAutoTTS(true));
|
||||
|
||||
startVoiceCall();
|
||||
// 添加事件监听器
|
||||
@ -85,12 +97,12 @@ const VoiceCallWindow: React.FC = () => {
|
||||
|
||||
return () => {
|
||||
// 更新语音通话窗口状态
|
||||
dispatch(setIsVoiceCallActive(false));
|
||||
memoizedDispatch(setIsVoiceCallActive(false));
|
||||
VoiceCallService.endCall();
|
||||
// 移除事件监听器
|
||||
window.removeEventListener('tts-state-change', handleTTSStateChange as EventListener);
|
||||
};
|
||||
}, [t, dispatch]);
|
||||
}, [t, memoizedDispatch, memoizedHandleClose]);
|
||||
|
||||
// 语音通话相关处理
|
||||
const toggleMute = () => {
|
||||
@ -106,7 +118,7 @@ const VoiceCallWindow: React.FC = () => {
|
||||
|
||||
// 关闭窗口
|
||||
const handleClose = () => {
|
||||
window.close();
|
||||
memoizedHandleClose();
|
||||
};
|
||||
|
||||
// 长按说话相关处理
|
||||
|
||||
@ -64,7 +64,10 @@ class ASRService {
|
||||
// 创建新连接
|
||||
try {
|
||||
console.log('[ASRService] 正在连接WebSocket服务器...')
|
||||
window.message.loading({ content: '正在连接语音识别服务...', key: 'ws-connect' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.loading({ content: '正在连接语音识别服务...', key: 'ws-connect' })
|
||||
}, 0)
|
||||
|
||||
this.ws = new WebSocket('ws://localhost:8080')
|
||||
this.wsConnected = false
|
||||
@ -72,7 +75,10 @@ class ASRService {
|
||||
|
||||
this.ws.onopen = () => {
|
||||
console.log('[ASRService] WebSocket连接成功')
|
||||
window.message.success({ content: '语音识别服务连接成功', key: 'ws-connect' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.success({ content: '语音识别服务连接成功', key: 'ws-connect' })
|
||||
}, 0)
|
||||
this.wsConnected = true
|
||||
this.reconnectAttempt = 0
|
||||
this.ws?.send(JSON.stringify({ type: 'identify', role: 'electron' }))
|
||||
@ -89,7 +95,10 @@ class ASRService {
|
||||
this.ws.onerror = (error) => {
|
||||
console.error('[ASRService] WebSocket连接错误:', error)
|
||||
this.wsConnected = false
|
||||
window.message.error({ content: '语音识别服务连接失败', key: 'ws-connect' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.error({ content: '语音识别服务连接失败', key: 'ws-connect' })
|
||||
}, 0)
|
||||
resolve(false)
|
||||
}
|
||||
|
||||
@ -114,18 +123,27 @@ class ASRService {
|
||||
if (data.message === 'browser_ready' || data.message === 'Browser connected') {
|
||||
console.log('[ASRService] 浏览器已准备好')
|
||||
this.browserReady = true
|
||||
window.message.success({ content: '语音识别浏览器已准备好', key: 'browser-status' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.success({ content: '语音识别浏览器已准备好', key: 'browser-status' })
|
||||
}, 0)
|
||||
} else if (data.message === 'Browser disconnected' || data.message === 'Browser connection error') {
|
||||
console.log('[ASRService] 浏览器断开连接')
|
||||
this.browserReady = false
|
||||
window.message.error({ content: '语音识别浏览器断开连接', key: 'browser-status' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.error({ content: '语音识别浏览器断开连接', key: 'browser-status' })
|
||||
}, 0)
|
||||
} else if (data.message === 'stopped') {
|
||||
// 语音识别已停止
|
||||
console.log('[ASRService] 语音识别已停止')
|
||||
this.isRecording = false
|
||||
|
||||
// 如果没有收到最终结果,显示处理完成消息
|
||||
window.message.success({ content: i18n.t('settings.asr.completed'), key: 'asr-processing' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.success({ content: i18n.t('settings.asr.completed'), key: 'asr-processing' })
|
||||
}, 0)
|
||||
} else if (data.message === 'reset_complete') {
|
||||
// 语音识别已重置
|
||||
console.log('[ASRService] 语音识别已强制重置')
|
||||
@ -136,7 +154,10 @@ class ASRService {
|
||||
this.resultCallback = null
|
||||
|
||||
// 显示重置完成消息
|
||||
window.message.info({ content: '语音识别已重置', key: 'asr-reset' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.info({ content: '语音识别已重置', key: 'asr-reset' })
|
||||
}, 0)
|
||||
|
||||
// 如果有回调函数,调用一次空字符串,触发按钮状态重置
|
||||
if (tempCallback && typeof tempCallback === 'function') {
|
||||
@ -169,7 +190,10 @@ class ASRService {
|
||||
|
||||
// 调用回调函数
|
||||
tempCallback(data.data.text, true)
|
||||
window.message.success({ content: i18n.t('settings.asr.success'), key: 'asr-processing' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.success({ content: i18n.t('settings.asr.success'), key: 'asr-processing' })
|
||||
}, 0)
|
||||
} else if (this.isRecording) { // 只在录音中才处理中间结果
|
||||
// 非最终结果,也调用回调,但标记为非最终
|
||||
console.log('[ASRService] 收到中间结果,调用回调函数,文本:', data.data.text)
|
||||
@ -183,10 +207,13 @@ class ASRService {
|
||||
}
|
||||
} else if (data.type === 'error') {
|
||||
console.error('[ASRService] 收到错误消息:', data.message || data.data)
|
||||
window.message.error({
|
||||
content: `语音识别错误: ${data.message || data.data?.error || '未知错误'}`,
|
||||
key: 'asr-error'
|
||||
})
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.error({
|
||||
content: `语音识别错误: ${data.message || data.data?.error || '未知错误'}`,
|
||||
key: 'asr-error'
|
||||
})
|
||||
}, 0)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[ASRService] 解析WebSocket消息失败:', error, event.data)
|
||||
|
||||
@ -525,10 +525,16 @@ class VoiceCallServiceClass {
|
||||
)
|
||||
|
||||
// 使用消息通知用户
|
||||
window.message.success({ content: '语音识别已完成,正在发送消息...', key: 'voice-call-send' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.success({ content: '语音识别已完成,正在发送消息...', key: 'voice-call-send' })
|
||||
}, 0)
|
||||
} catch (error) {
|
||||
console.error('发送语音识别结果到聊天界面时出错:', error)
|
||||
window.message.error({ content: '发送语音识别结果失败', key: 'voice-call-error' })
|
||||
// 使用 setTimeout 避免在渲染过程中调用 message API
|
||||
setTimeout(() => {
|
||||
window.message.error({ content: '发送语音识别结果失败', key: 'voice-call-error' })
|
||||
}, 0)
|
||||
}
|
||||
|
||||
// 不在这里处理响应,因为聊天界面会处理
|
||||
|
||||
Loading…
Reference in New Issue
Block a user