From ac5e68f5a57fa736023e4f4598cbe18dd71f7cfd Mon Sep 17 00:00:00 2001
From: Marcus <1922576605@qq.com>
Date: Mon, 30 Jun 2025 10:49:24 +0800
Subject: [PATCH 1/2] =?UTF-8?q?feat:=20=E9=83=A8=E5=88=86=E8=AF=AD?=
=?UTF-8?q?=E9=9F=B3=E8=81=8A=E5=A4=A9=E5=8A=9F=E8=83=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
backend/app/api/v1/endpoints/tts.py | 8 +-
.../app/api/v1/endpoints/websocket_service.py | 3 +-
web/src/components/tts.vue | 4 +-
web/src/layouts/BasicLayout.vue | 15 +-
web/src/router/index.ts | 15 +-
web/src/services/websocket.ts | 24 +-
web/src/stores/index.ts | 3 +-
web/src/stores/voice_store.ts | 293 ++++++++++++++++++
.../{CommunityView.vue => ChatLLMView.vue} | 0
web/src/views/VoiceView.vue | 254 +++++++++++++++
10 files changed, 594 insertions(+), 25 deletions(-)
create mode 100644 web/src/stores/voice_store.ts
rename web/src/views/{CommunityView.vue => ChatLLMView.vue} (100%)
create mode 100644 web/src/views/VoiceView.vue
diff --git a/backend/app/api/v1/endpoints/tts.py b/backend/app/api/v1/endpoints/tts.py
index e3462c8..687bcfc 100644
--- a/backend/app/api/v1/endpoints/tts.py
+++ b/backend/app/api/v1/endpoints/tts.py
@@ -9,7 +9,7 @@ from typing import Dict, Any, Optional as OptionalType
from app.constants.tts import APP_ID, TOKEN, SPEAKER
-# 协议常量保持不变...
+# 协议常量
PROTOCOL_VERSION = 0b0001
DEFAULT_HEADER_SIZE = 0b0001
FULL_CLIENT_REQUEST = 0b0001
@@ -35,7 +35,7 @@ EVENT_TTSSentenceEnd = 351
EVENT_TTSResponse = 352
-# 所有类定义保持不变...
+# 所有类定义
class Header:
def __init__(self,
protocol_version=PROTOCOL_VERSION,
@@ -93,7 +93,7 @@ class Response:
self.payload_json = None
-# 工具函数保持不变...
+# 工具函数
def gen_log_id():
"""生成logID"""
ts = int(time.time() * 1000)
@@ -191,7 +191,7 @@ async def send_event(ws, header, optional=None, payload=None):
await ws.send(full_client_request)
-# 修改:TTS状态管理类,添加消息ID和任务追踪
+# TTS状态管理类,添加消息ID和任务追踪
class TTSState:
def __init__(self, message_id: str):
self.message_id = message_id
diff --git a/backend/app/api/v1/endpoints/websocket_service.py b/backend/app/api/v1/endpoints/websocket_service.py
index 3e683ed..8d54c35 100644
--- a/backend/app/api/v1/endpoints/websocket_service.py
+++ b/backend/app/api/v1/endpoints/websocket_service.py
@@ -5,7 +5,6 @@ from aip import AipSpeech
from app.constants.asr import APP_ID, API_KEY, SECRET_KEY
import json
-# 导入修改后的TTS模块
from . import tts
router = APIRouter()
@@ -62,7 +61,7 @@ async def websocket_online_count(websocket: WebSocket):
await websocket.send_json({"type": "asr_result", "result": asr_text})
temp_buffer = bytes()
- # 修改:TTS处理支持消息ID
+ # TTS处理
elif msg_type == "tts_text":
message_id = data.get("messageId")
text = data.get("text", "")
diff --git a/web/src/components/tts.vue b/web/src/components/tts.vue
index b7000e6..1e673bd 100644
--- a/web/src/components/tts.vue
+++ b/web/src/components/tts.vue
@@ -34,13 +34,15 @@ const handleClick = () => {
ttsStore.convertText(text, messageId);
}
};
-// 当文本改变时清理之前的音频
+
+// 文本改变清理之前的音频
watch(
() => text,
() => {
ttsStore.clearAudio(messageId);
}
);
+
onUnmounted(() => {
ttsStore.clearAudio(messageId);
});
diff --git a/web/src/layouts/BasicLayout.vue b/web/src/layouts/BasicLayout.vue
index d2c6c0d..0664ca1 100644
--- a/web/src/layouts/BasicLayout.vue
+++ b/web/src/layouts/BasicLayout.vue
@@ -45,7 +45,20 @@ const { hiddenLeftSidebar, simpleMode } = storeToRefs(layoutStore);
"
to="/"
>
- 聊天
+ 对话
+
+
+ 语音聊天
diff --git a/web/src/router/index.ts b/web/src/router/index.ts
index e4daaed..10ee4a9 100644
--- a/web/src/router/index.ts
+++ b/web/src/router/index.ts
@@ -2,7 +2,8 @@ import { createRouter, createWebHistory } from "vue-router";
import BasicLayout from "@/layouts/BasicLayout.vue";
import { resetDescription, setTitle } from "@/utils";
-import community from "@/views/CommunityView.vue";
+import ChatLLM from "@/views/ChatLLMView.vue";
+import VoiceView from "@/views/VoiceView.vue";
const router = createRouter({
history: createWebHistory(import.meta.env.BASE_URL),
@@ -13,11 +14,19 @@ const router = createRouter({
children: [
{
path: "",
- name: "community",
- component: community,
+ name: "ChatLLM",
+ component: ChatLLM,
meta: {
title: "对话"
}
+ },
+ {
+ path: "/voice",
+ name: "Voice",
+ component: VoiceView,
+ meta: {
+ title: "语音对话"
+ }
}
]
}
diff --git a/web/src/services/websocket.ts b/web/src/services/websocket.ts
index 489ec21..115ba05 100644
--- a/web/src/services/websocket.ts
+++ b/web/src/services/websocket.ts
@@ -1,4 +1,4 @@
-import { useChatStore, useTtsStore } from "@/stores";
+import { useChatStore, useTtsStore,useVoiceStore } from "@/stores";
// WebSocket
export const useWebSocketStore = defineStore("websocket", () => {
@@ -6,6 +6,8 @@ export const useWebSocketStore = defineStore("websocket", () => {
const connected = ref(false);
const chatStore = useChatStore();
const ttsStore = useTtsStore();
+ const voiceStore = useVoiceStore();
+ const router = useRouter();
const { onlineCount } = storeToRefs(chatStore);
@@ -31,7 +33,14 @@ export const useWebSocketStore = defineStore("websocket", () => {
onlineCount.value = data.online_count;
break;
case "asr_result":
- chatStore.addMessageToHistory(data.result);
+ if (router.currentRoute.value.path === "/") {
+ chatStore.addMessageToHistory(data.result);
+ } else if (router.currentRoute.value.path === "/voice") {
+ // 在语音页面,使用VoiceStore处理
+ voiceStore.handleASRResult(data.result);
+ } else {
+ console.warn(data);
+ }
break;
// 新的TTS消息格式处理
@@ -76,7 +85,6 @@ export const useWebSocketStore = defineStore("websocket", () => {
ttsStore.finishConversion(data.messageId);
} else {
console.log("TTS音频传输完成(无messageId)");
- // 兜底处理,可能是旧格式
ttsStore.finishConversion(data.messageId);
}
break;
@@ -85,7 +93,6 @@ export const useWebSocketStore = defineStore("websocket", () => {
// TTS会话结束
if (data.messageId) {
console.log(`TTS会话结束 [${data.messageId}]`);
- // 可以添加额外的清理逻辑
} else {
console.log("TTS会话结束");
}
@@ -98,19 +105,10 @@ export const useWebSocketStore = defineStore("websocket", () => {
ttsStore.handleError(data.message, data.messageId);
} else {
console.error("TTS错误:", data.message);
- // 兜底处理,可能是旧格式
ttsStore.handleError(data.message, data.messageId || "unknown");
}
break;
- // 保留旧的消息类型作为兜底处理
- case "tts_audio_complete_legacy":
- case "tts_complete_legacy":
- case "tts_error_legacy":
- console.log("收到旧格式TTS消息:", data.type);
- // 可以选择处理或忽略
- break;
-
default:
console.log("未知消息类型:", data.type, data);
}
diff --git a/web/src/stores/index.ts b/web/src/stores/index.ts
index 9647e3d..e40c7b1 100644
--- a/web/src/stores/index.ts
+++ b/web/src/stores/index.ts
@@ -1,4 +1,5 @@
export * from "./asr_store";
export * from "./chat_store";
export * from "./layout_store";
-export * from "./tts_store";
\ No newline at end of file
+export * from "./tts_store";
+export * from "./voice_store";
diff --git a/web/src/stores/voice_store.ts b/web/src/stores/voice_store.ts
new file mode 100644
index 0000000..442607e
--- /dev/null
+++ b/web/src/stores/voice_store.ts
@@ -0,0 +1,293 @@
+import { useWebSocketStore } from "@/services";
+import { useChatStore, useTtsStore } from "@/stores";
+
+export const useVoiceStore = defineStore("voice", () => {
+ // 状态管理
+ const isListening = ref(false); // 是否正在监听语音输入
+ const isProcessing = ref(false); // 是否正在处理(包括ASR、LLM、TTS全流程)
+ const currentSessionId = ref
(null); // 当前会话ID
+
+ // 依赖的其他store
+ const chatStore = useChatStore();
+ const ttsStore = useTtsStore();
+ const wsStore = useWebSocketStore();
+
+ // 语音消息历史
+ const voiceMessages = ref<
+ {
+ id: string;
+ type: "user" | "assistant";
+ text: string;
+ audioId?: string;
+ timestamp: number;
+ isProcessing?: boolean;
+ }[]
+ >([]);
+
+ // ASR缓冲区状态
+ const isRecording = ref(false);
+ const recordingStartTime = ref(null);
+ const recordingMaxDuration = 60 * 1000; // 最大录音时长 60 秒
+
+ /**
+ * 开始语音输入
+ */
+ const startListening = async () => {
+ if (isListening.value) return;
+
+ try {
+ await wsStore.connect();
+
+ // 创建新的会话ID
+ currentSessionId.value = new Date().getTime().toString();
+ isListening.value = true;
+ isRecording.value = true;
+ recordingStartTime.value = Date.now();
+
+ // 开始录音 - 假设我们有一个 startRecording 方法
+ // 这里通常会调用浏览器的 MediaRecorder API
+ await startRecording();
+
+ console.log("开始语音输入");
+ } catch (error) {
+ console.error("启动语音输入失败:", error);
+ stopListening();
+ }
+ };
+
+ /**
+ * 停止语音输入
+ */
+ const stopListening = async () => {
+ if (!isListening.value) return;
+
+ try {
+ // 停止录音
+ if (isRecording.value) {
+ await stopRecording();
+ isRecording.value = false;
+ }
+
+ isListening.value = false;
+ recordingStartTime.value = null;
+
+ // 发送结束信号
+ wsStore.send(JSON.stringify({ type: "asr_end" }));
+ console.log("停止语音输入,等待ASR结果");
+ } catch (error) {
+ console.error("停止语音输入失败:", error);
+ }
+ };
+
+ /**
+ * 录音时间检查
+ */
+ const checkRecordingTime = () => {
+ if (isRecording.value && recordingStartTime.value) {
+ const currentTime = Date.now();
+ const duration = currentTime - recordingStartTime.value;
+
+ if (duration >= recordingMaxDuration) {
+ console.log("录音达到最大时长,自动停止");
+ stopListening();
+ }
+ }
+ };
+
+ // 定时检查录音时间
+ let recordingTimer: any = null;
+ watch(isRecording, (newVal) => {
+ if (newVal) {
+ recordingTimer = setInterval(checkRecordingTime, 1000);
+ } else if (recordingTimer) {
+ clearInterval(recordingTimer);
+ recordingTimer = null;
+ }
+ });
+
+ /**
+ * 处理ASR结果
+ */
+ const handleASRResult = async (text: string) => {
+ if (!text.trim()) return;
+
+ console.log("收到ASR结果:", text);
+ isProcessing.value = true;
+
+ // 添加用户消息
+ const userMessageId = new Date().getTime().toString();
+ voiceMessages.value.push({
+ id: userMessageId,
+ type: "user",
+ text,
+ timestamp: Date.now()
+ });
+
+ // 添加助手消息占位
+ const assistantMessageId = new Date().getTime().toString();
+ voiceMessages.value.push({
+ id: assistantMessageId,
+ type: "assistant",
+ text: "",
+ timestamp: Date.now(),
+ isProcessing: true
+ });
+
+ // 调用LLM生成回复
+ await generateLLMResponse(text, assistantMessageId);
+ };
+
+ /**
+ * 生成LLM回复
+ */
+ const generateLLMResponse = async (userInput: string, responseId: string) => {
+ try {
+ console.log("生成LLM回复...");
+
+ // 构建消息历史
+ const messages = [
+ ...voiceMessages.value
+ .filter((msg) => !msg.isProcessing)
+ .map((msg) => ({
+ role: msg.type === "user" ? "user" : "assistant",
+ content: msg.text
+ })),
+ { role: "user", content: userInput }
+ ];
+
+ let responseText = "";
+
+ // 调用ChatStore的聊天方法
+ await chatStore.chatWithLLM(
+ {
+ messages,
+ model: chatStore.modelInfo?.model_id || ""
+ },
+ // 处理流式回复
+ (content) => {
+ responseText = content;
+ // 更新助手消息
+ const index = voiceMessages.value.findIndex(
+ (msg) => msg.id === responseId
+ );
+ if (index !== -1) {
+ voiceMessages.value[index].text = content;
+ }
+ }
+ );
+
+ // LLM生成完成,转换为语音
+ console.log("LLM回复生成完成:", responseText);
+ await synthesizeSpeech(responseText, responseId);
+ } catch (error) {
+ console.error("生成LLM回复失败:", error);
+ const index = voiceMessages.value.findIndex(
+ (msg) => msg.id === responseId
+ );
+ if (index !== -1) {
+ voiceMessages.value[index].text = "抱歉,生成回复时出错";
+ voiceMessages.value[index].isProcessing = false;
+ }
+ isProcessing.value = false;
+ }
+ };
+
+ /**
+ * 转换文本为语音
+ */
+ const synthesizeSpeech = async (text: string, messageId: string) => {
+ try {
+ console.log("转换文本为语音...");
+
+ // 调用TTS生成语音
+ await ttsStore.convertText(text, messageId);
+
+ // 注意:TTS音频生成完成后会自动播放
+ // 这部分逻辑在TTS Store的finishConversion方法中处理
+
+ // 更新消息状态
+ const index = voiceMessages.value.findIndex(
+ (msg) => msg.id === messageId
+ );
+ if (index !== -1) {
+ voiceMessages.value[index].audioId = messageId;
+ voiceMessages.value[index].isProcessing = false;
+ }
+ } catch (error) {
+ console.error("转换文本为语音失败:", error);
+ const index = voiceMessages.value.findIndex(
+ (msg) => msg.id === messageId
+ );
+ if (index !== -1) {
+ voiceMessages.value[index].isProcessing = false;
+ }
+ } finally {
+ isProcessing.value = false;
+ }
+ };
+
+ /**
+ * 清除所有消息
+ */
+ const clearMessages = () => {
+ voiceMessages.value = [];
+ };
+
+ /**
+ * 播放指定消息的语音
+ */
+ const playMessageAudio = async (messageId: string) => {
+ const message = voiceMessages.value.find((msg) => msg.id === messageId);
+ if (message && message.audioId) {
+ await ttsStore.play(message.audioId);
+ }
+ };
+
+ /**
+ * 暂停当前播放的语音
+ */
+ const pauseAudio = () => {
+ ttsStore.pauseAll();
+ };
+
+ // 录音相关方法 - 这里需要根据实际情况实现
+ // 通常会使用MediaRecorder API
+ const startRecording = async () => {
+ // 实现录音开始逻辑
+ // 1. 获取麦克风权限
+ // 2. 创建MediaRecorder
+ // 3. 监听数据可用事件,发送到WebSocket
+ console.log("开始录音...");
+ };
+
+ const stopRecording = async () => {
+ // 实现录音停止逻辑
+ console.log("停止录音...");
+ };
+
+ // 在组件卸载时清理资源
+ onUnmounted(() => {
+ if (isRecording.value) {
+ stopRecording();
+ }
+ if (recordingTimer) {
+ clearInterval(recordingTimer);
+ }
+ });
+
+ return {
+ // 状态
+ isListening,
+ isProcessing,
+ isRecording,
+ voiceMessages,
+
+ // 方法
+ startListening,
+ stopListening,
+ handleASRResult,
+ clearMessages,
+ playMessageAudio,
+ pauseAudio
+ };
+});
diff --git a/web/src/views/CommunityView.vue b/web/src/views/ChatLLMView.vue
similarity index 100%
rename from web/src/views/CommunityView.vue
rename to web/src/views/ChatLLMView.vue
diff --git a/web/src/views/VoiceView.vue b/web/src/views/VoiceView.vue
new file mode 100644
index 0000000..3d3ee1d
--- /dev/null
+++ b/web/src/views/VoiceView.vue
@@ -0,0 +1,254 @@
+
+
+
+
+
+
+
+
+
+
+
+ 助手:
+ 你好,我是你的智能助手,请问有什么可以帮助你的吗?
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
{{
+ msg.role === "user" ? "你:" : "助手:"
+ }}
+
+
+ Tokens: {{ msg.usage?.total_tokens }}
+
+
+
+ handleItemHeaderClick(getName(msg, idx))
+ "
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 正在语音输入...
+
+
+
+
+
+
+
+
+ {}"
+ >
+
+
+
+
+
+ 清除历史
+
+
+
+ 确定要清除历史消息吗?
+
+
+
+
+ {{ isRecording ? "停止输入" : "语音输入" }}
+
+
+
+
+
+
+
From ac549bd939ba11e48b73146e7ca77e6ef358ba48 Mon Sep 17 00:00:00 2001
From: Marcus <1922576605@qq.com>
Date: Tue, 1 Jul 2025 00:00:31 +0800
Subject: [PATCH 2/2] =?UTF-8?q?feat:=20=E5=AE=8C=E6=88=90=E5=A4=A7?=
=?UTF-8?q?=E9=83=A8=E5=88=86=E5=8A=9F=E8=83=BD=E5=BC=80=E5=8F=91?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
backend/app/api/v1/endpoints/tts.py | 3 +-
.../api/v1/endpoints/voice_conversation.py | 105 +++++++
.../app/api/v1/endpoints/websocket_service.py | 10 +-
web/components.d.ts | 1 +
web/eslint.config.js | 3 +-
web/src/assets/Icons/index.ts | 1 +
.../svg/heroicons/DocumentDuplicateIcon.svg | 3 +
web/src/interfaces/chat_service.ts | 1 +
web/src/services/websocket.ts | 16 +-
web/src/stores/asr_store.ts | 15 +-
web/src/stores/chat_store.ts | 150 ++++-----
web/src/stores/index.ts | 1 -
web/src/stores/voice_store.ts | 293 ------------------
web/src/utils/clipboard.ts | 23 ++
web/src/utils/index.ts | 1 +
web/src/views/ChatLLMView.vue | 25 +-
web/src/views/VoiceView.vue | 9 +-
17 files changed, 278 insertions(+), 382 deletions(-)
create mode 100644 backend/app/api/v1/endpoints/voice_conversation.py
create mode 100644 web/src/assets/Icons/svg/heroicons/DocumentDuplicateIcon.svg
delete mode 100644 web/src/stores/voice_store.ts
create mode 100644 web/src/utils/clipboard.ts
diff --git a/backend/app/api/v1/endpoints/tts.py b/backend/app/api/v1/endpoints/tts.py
index 687bcfc..6caafeb 100644
--- a/backend/app/api/v1/endpoints/tts.py
+++ b/backend/app/api/v1/endpoints/tts.py
@@ -374,8 +374,9 @@ async def process_tts_task(websocket, message_id: str, text: str):
elif res.optional.event == EVENT_TTSResponse:
audio_count += 1
print(f"发送音频数据 [{message_id}] #{audio_count},大小: {len(res.payload)}")
- # 发送音频数据,包含消息ID
+ # 发送音频数据
await websocket.send_json({
+ "id": audio_count,
"type": "tts_audio_data",
"messageId": message_id,
"audioData": res.payload.hex() # 转为hex字符串
diff --git a/backend/app/api/v1/endpoints/voice_conversation.py b/backend/app/api/v1/endpoints/voice_conversation.py
new file mode 100644
index 0000000..3501091
--- /dev/null
+++ b/backend/app/api/v1/endpoints/voice_conversation.py
@@ -0,0 +1,105 @@
+import json
+import aiohttp
+import asyncio
+from fastapi.encoders import jsonable_encoder
+from starlette.websockets import WebSocket
+
+from . import tts
+from app.constants.model_data import tip_message, base_url, headers
+
+
+async def process_voice_conversation(websocket: WebSocket, asr_text: str, message_id: str):
+ try:
+ print(f"开始处理语音对话 [{message_id}]: {asr_text}")
+
+ # 1. 发送ASR识别结果到前端
+ await websocket.send_json({
+ "type": "asr_result",
+ "messageId": message_id,
+ "result": asr_text
+ })
+
+ # 2. 构建LLM请求
+ messages = [
+ tip_message,
+ {"role": "user", "content": asr_text}
+ ]
+ payload = {
+ "model": "gpt-4o",
+ "messages": messages,
+ "stream": True
+ }
+
+ print(f"发送LLM请求 [{message_id}]: {json.dumps(payload, ensure_ascii=False)}")
+
+ # 3. 流式处理LLM响应
+ full_response = ""
+ llm_completed = False
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ base_url,
+ headers=headers,
+ json=jsonable_encoder(payload)
+ ) as resp:
+ if resp.status != 200:
+ error_text = await resp.text()
+ raise Exception(f"LLM API请求失败: {resp.status} - {error_text}")
+
+ # 读取流式响应
+ async for line in resp.content:
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ data = line[6:].strip()
+ if data == '[DONE]':
+ llm_completed = True
+ print(f"LLM响应完成 [{message_id}]")
+ break
+
+ try:
+ result = json.loads(data)
+ # 提取内容
+ choices = result.get("choices", [])
+ if not choices:
+ # 跳过空choices数据包
+ continue
+
+ delta = choices[0].get("delta", {})
+ content = delta.get("content")
+
+ if content:
+ full_response += content
+
+ except json.JSONDecodeError as e:
+ print(f"JSON解析错误 [{message_id}]: {e}, 数据: {data}")
+ continue
+ except Exception as e:
+ print(f"处理数据包异常 [{message_id}]: {e}, 数据: {data}")
+ continue
+
+ # 4. LLM生成完成后,启动完整的TTS处理
+ if llm_completed and full_response:
+ print(f"LLM生成完成 [{message_id}], 总内容长度: {len(full_response)}")
+ print(f"完整内容: {full_response}")
+
+ # 发送完成消息
+ await websocket.send_json({
+ "type": "llm_complete_response",
+ "messageId": message_id,
+ "content": full_response
+ })
+
+ # 启动TTS处理完整内容
+ print(f"启动完整TTS处理 [{message_id}]: {full_response}")
+ await tts.handle_tts_text(websocket, message_id, full_response)
+
+ except Exception as e:
+ print(f"语音对话处理异常 [{message_id}]: {e}")
+ import traceback
+ traceback.print_exc()
+ await websocket.send_json({
+ "type": "voice_conversation_error",
+ "messageId": message_id,
+ "message": f"处理失败: {str(e)}"
+ })
diff --git a/backend/app/api/v1/endpoints/websocket_service.py b/backend/app/api/v1/endpoints/websocket_service.py
index 8d54c35..d6d32f5 100644
--- a/backend/app/api/v1/endpoints/websocket_service.py
+++ b/backend/app/api/v1/endpoints/websocket_service.py
@@ -1,4 +1,6 @@
# websocket_service.py
+import uuid
+
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from typing import Set
from aip import AipSpeech
@@ -6,6 +8,7 @@ from app.constants.asr import APP_ID, API_KEY, SECRET_KEY
import json
from . import tts
+from .voice_conversation import process_voice_conversation
router = APIRouter()
active_connections: Set[WebSocket] = set()
@@ -58,7 +61,12 @@ async def websocket_online_count(websocket: WebSocket):
elif msg_type == "asr_end":
asr_text = await asr_buffer(temp_buffer)
- await websocket.send_json({"type": "asr_result", "result": asr_text})
+ # 从data中获取messageId,如果不存在则生成一个新的ID
+ message_id = data.get("messageId", "voice_" + str(uuid.uuid4()))
+ if data.get("voiceConversation"):
+ await process_voice_conversation(websocket, asr_text, message_id)
+ else:
+ await websocket.send_json({"type": "asr_result", "result": asr_text})
temp_buffer = bytes()
# TTS处理
diff --git a/web/components.d.ts b/web/components.d.ts
index ddc6cfc..3400cc8 100644
--- a/web/components.d.ts
+++ b/web/components.d.ts
@@ -21,6 +21,7 @@ declare module 'vue' {
NPopover: typeof import('naive-ui')['NPopover']
NScrollbar: typeof import('naive-ui')['NScrollbar']
NSelect: typeof import('naive-ui')['NSelect']
+ NSpin: typeof import('naive-ui')['NSpin']
NTag: typeof import('naive-ui')['NTag']
RouterLink: typeof import('vue-router')['RouterLink']
RouterView: typeof import('vue-router')['RouterView']
diff --git a/web/eslint.config.js b/web/eslint.config.js
index be6c010..553e2a1 100644
--- a/web/eslint.config.js
+++ b/web/eslint.config.js
@@ -32,6 +32,7 @@ export default antfu({
"ts/no-unsafe-function-type": "off",
"no-console": "off",
"unused-imports/no-unused-vars": "warn",
- "ts/no-use-before-define": "off"
+ "ts/no-use-before-define": "off",
+ "vue/operator-linebreak": "off",
}
});
diff --git a/web/src/assets/Icons/index.ts b/web/src/assets/Icons/index.ts
index 409197d..765b451 100644
--- a/web/src/assets/Icons/index.ts
+++ b/web/src/assets/Icons/index.ts
@@ -1,4 +1,5 @@
export { default as ChevronLeftIcon } from "./svg/heroicons/ChevronLeftIcon.svg?component";
+export { default as DocumentDuplicateIcon } from "./svg/heroicons/DocumentDuplicateIcon.svg?component";
export { default as ExclamationTriangleIcon } from "./svg/heroicons/ExclamationTriangleIcon.svg?component";
export { default as microphone } from "./svg/heroicons/MicrophoneIcon.svg?component";
export { default as PaperAirplaneIcon } from "./svg/heroicons/PaperAirplaneIcon.svg?component";
diff --git a/web/src/assets/Icons/svg/heroicons/DocumentDuplicateIcon.svg b/web/src/assets/Icons/svg/heroicons/DocumentDuplicateIcon.svg
new file mode 100644
index 0000000..bf48f9f
--- /dev/null
+++ b/web/src/assets/Icons/svg/heroicons/DocumentDuplicateIcon.svg
@@ -0,0 +1,3 @@
+
diff --git a/web/src/interfaces/chat_service.ts b/web/src/interfaces/chat_service.ts
index 94e2f51..2724c15 100644
--- a/web/src/interfaces/chat_service.ts
+++ b/web/src/interfaces/chat_service.ts
@@ -12,6 +12,7 @@ export interface Message {
role?: string;
usage?: UsageInfo;
id?: string;
+ type?: 'chat' | 'voice';
[property: string]: any;
}
diff --git a/web/src/services/websocket.ts b/web/src/services/websocket.ts
index 115ba05..78fd4c8 100644
--- a/web/src/services/websocket.ts
+++ b/web/src/services/websocket.ts
@@ -1,4 +1,4 @@
-import { useChatStore, useTtsStore,useVoiceStore } from "@/stores";
+import { useChatStore, useTtsStore } from "@/stores";
// WebSocket
export const useWebSocketStore = defineStore("websocket", () => {
@@ -6,7 +6,6 @@ export const useWebSocketStore = defineStore("websocket", () => {
const connected = ref(false);
const chatStore = useChatStore();
const ttsStore = useTtsStore();
- const voiceStore = useVoiceStore();
const router = useRouter();
const { onlineCount } = storeToRefs(chatStore);
@@ -16,13 +15,11 @@ export const useWebSocketStore = defineStore("websocket", () => {
if (e.data instanceof ArrayBuffer) {
// 处理二进制音频数据(兜底处理,新版本应该不会用到)
console.log("收到二进制音频数据,大小:", e.data.byteLength);
- console.warn("收到旧格式的二进制数据,无法确定messageId");
// 可以选择忽略或者作为兜底处理
} else if (e.data instanceof Blob) {
// 如果是Blob,转换为ArrayBuffer(兜底处理)
e.data.arrayBuffer().then((buffer: ArrayBuffer) => {
console.log("收到Blob音频数据,大小:", buffer.byteLength);
- console.warn("收到旧格式的Blob数据,无法确定messageId");
});
} else if (typeof e.data === "string") {
// 处理文本JSON消息
@@ -36,8 +33,8 @@ export const useWebSocketStore = defineStore("websocket", () => {
if (router.currentRoute.value.path === "/") {
chatStore.addMessageToHistory(data.result);
} else if (router.currentRoute.value.path === "/voice") {
- // 在语音页面,使用VoiceStore处理
- voiceStore.handleASRResult(data.result);
+ // 在语音页面的处理
+ chatStore.addMessageToHistory(data.result, "user", "voice");
} else {
console.warn(data);
}
@@ -109,6 +106,13 @@ export const useWebSocketStore = defineStore("websocket", () => {
}
break;
+ case "llm_complete_response":
+ // LLM部分响应
+ if (router.currentRoute.value.path === "/voice") {
+ chatStore.addMessageToHistory(data.content, "assistant", "voice");
+ }
+ break;
+
default:
console.log("未知消息类型:", data.type, data);
}
diff --git a/web/src/stores/asr_store.ts b/web/src/stores/asr_store.ts
index f5b6b76..39a7fd4 100644
--- a/web/src/stores/asr_store.ts
+++ b/web/src/stores/asr_store.ts
@@ -11,8 +11,8 @@ export const useAsrStore = defineStore("asr", () => {
let mediaStreamSource: MediaStreamAudioSourceNode | null = null;
let workletNode: AudioWorkletNode | null = null;
- // 获取 WebSocket store 实例
const webSocketStore = useWebSocketStore();
+ const router = useRouter();
/**
* 发送消息到 WebSocket
@@ -81,16 +81,19 @@ export const useAsrStore = defineStore("asr", () => {
const processorUrl = URL.createObjectURL(blob);
// 加载AudioWorklet模块
await audioContext.audioWorklet.addModule(processorUrl);
- // 释放URL对象(防止内存泄漏)
+ // 释放URL对象防止内存泄漏
URL.revokeObjectURL(processorUrl);
+
// 创建音频源节点
mediaStreamSource = audioContext.createMediaStreamSource(stream);
+
// 创建AudioWorkletNode
workletNode = new AudioWorkletNode(audioContext, "audio-processor", {
numberOfInputs: 1,
numberOfOutputs: 1,
channelCount: 1
});
+
// 监听来自AudioWorklet的音频数据
workletNode.port.onmessage = (event) => {
if (event.data.type === "audiodata") {
@@ -116,8 +119,14 @@ export const useAsrStore = defineStore("asr", () => {
const stopRecording = () => {
if (!isRecording.value) return;
+ const messageId = `voice_${Date.now()}`;
// 通知后端录音结束
- sendMessage(JSON.stringify({ type: "asr_end" }));
+ const msg: Record = { type: "asr_end" };
+ if (router.currentRoute.value.path === "/voice") {
+ msg.messageId = messageId;
+ msg.voiceConversation = true;
+ }
+ sendMessage(JSON.stringify(msg));
// 停止所有音轨
if (mediaStreamSource?.mediaStream) {
diff --git a/web/src/stores/chat_store.ts b/web/src/stores/chat_store.ts
index 8392837..51dfb3e 100644
--- a/web/src/stores/chat_store.ts
+++ b/web/src/stores/chat_store.ts
@@ -7,7 +7,9 @@ import type {
import { ChatService } from "@/services";
export const useChatStore = defineStore("chat", () => {
+ const router = useRouter();
const token = "sk-fkGVZBrAqvIxLjlF3b5f19EfBb63486c90Fa5a1fBd7076Ee";
+
// 默认模型
const modelInfo = ref(null);
// 历史消息
@@ -16,32 +18,35 @@ export const useChatStore = defineStore("chat", () => {
const completing = ref(false);
// 是否正在思考
const thinking = ref(false);
+ // 模型列表
+ const modelList = ref([]);
// 在线人数
const onlineCount = ref(0);
+ // 生成消息ID方法
+ const generateMessageId = () => new Date().getTime().toString();
+
+ // 获取最后一条消息
+ const getLastMessage = () =>
+ historyMessages.value[historyMessages.value.length - 1];
+
// 与 LLM 聊天
const chatWithLLM = async (
request: IChatWithLLMRequest,
- onProgress: (content: string) => void, // 接收内容进度回调
- getUsageInfo: (object: UsageInfo) => void = () => {}, // 接收使用信息回调
- getThinking: (thinkingContent: string) => void = () => {} // 接收思维链内容回调
+ onProgress: (content: string) => void,
+ getUsageInfo: (object: UsageInfo) => void = () => {},
+ getThinking: (thinkingContent: string) => void = () => {}
) => {
if (completing.value) throw new Error("正在响应中");
- completing.value = true; // 开始请求
+ completing.value = true;
try {
await ChatService.ChatWithLLM(
token,
request,
- (content) => {
- onProgress(content);
- },
- (object: UsageInfo) => {
- getUsageInfo(object);
- },
- (thinkingContent: string) => {
- getThinking(thinkingContent);
- }
+ onProgress,
+ getUsageInfo,
+ getThinking
);
} catch (error) {
console.error("请求失败:", error);
@@ -51,28 +56,33 @@ export const useChatStore = defineStore("chat", () => {
};
// 添加消息到历史记录
- const addMessageToHistory = (message: string) => {
+ const addMessageToHistory = (
+ message: string,
+ role: "user" | "assistant" = "user",
+ type: "chat" | "voice" = "chat"
+ ) => {
const content = message.trim();
if (!content) return;
historyMessages.value.push({
- role: "user",
- content
+ role,
+ content,
+ type,
+ id: generateMessageId()
});
};
// 清除历史消息
- const clearHistoryMessages = () => {
- historyMessages.value = [];
+ const clearHistoryMessages = (type?: "chat" | "voice") => {
+ historyMessages.value = type
+ ? historyMessages.value.filter((msg) => msg.type !== type)
+ : [];
};
- // 确保最后一条消息是助手消息,如果最后一条消息不是,就加一条空的占位,不然后面的思维链会丢失
+ // 确保最后一条消息是助手消息
const ensureAssistantMessage = () => {
- if (
- historyMessages.value.length === 0 ||
- historyMessages.value[historyMessages.value.length - 1].role !==
- "assistant"
- ) {
+ const lastMessage = getLastMessage();
+ if (!lastMessage || lastMessage.role !== "assistant") {
historyMessages.value.push({
role: "assistant",
content: ""
@@ -80,57 +90,57 @@ export const useChatStore = defineStore("chat", () => {
}
};
+ // 处理聊天响应的逻辑
+ const handleChatResponse = async (
+ messages: IChatWithLLMRequest["messages"]
+ ) => {
+ if (!modelInfo.value) return;
+
+ // 过滤出type为chat的聊天消息
+ const filteredMessages = computed(() =>
+ messages.filter((msg) => msg.type === "chat" || !msg.type)
+ );
+
+ await chatWithLLM(
+ { messages: filteredMessages.value, model: modelInfo.value.model_id },
+ // 处理文本内容
+ (content) => {
+ ensureAssistantMessage();
+ thinking.value = false;
+ getLastMessage().content = content;
+ },
+ // 处理使用信息
+ (usageInfo: UsageInfo) => {
+ const lastMessage = getLastMessage();
+ if (lastMessage?.role === "assistant") {
+ lastMessage.usage = usageInfo;
+ }
+ },
+ // 处理思维链
+ (thinkingContent: string) => {
+ ensureAssistantMessage();
+ thinking.value = true;
+ getLastMessage().thinking = thinkingContent;
+ }
+ );
+
+ // 设置消息ID
+ getLastMessage().id = generateMessageId();
+ };
+
watch(
historyMessages,
(newVal) => {
- // 当历史消息变化时,发送请求
- if (newVal.length > 0) {
+ if (newVal.length > 0 && router.currentRoute.value.path === "/") {
const lastMessage = newVal[newVal.length - 1];
- if (lastMessage.role === "user" && modelInfo.value) {
- chatWithLLM(
- {
- messages: newVal,
- model: modelInfo.value?.model_id
- },
- // 处理进度回调,文本
- (content) => {
- ensureAssistantMessage();
- thinking.value = false;
- historyMessages.value[historyMessages.value.length - 1].content =
- content;
- },
- // 处理使用usage信息回调
- (usageInfo: UsageInfo) => {
- // 如果最后一条消息是助手的回复,则更新使用信息
- if (
- historyMessages.value.length > 0 &&
- historyMessages.value[historyMessages.value.length - 1].role ===
- "assistant"
- ) {
- historyMessages.value[historyMessages.value.length - 1].usage =
- usageInfo;
- }
- },
- // 处理思维链内容回调
- (thinkingContent: string) => {
- ensureAssistantMessage();
- thinking.value = true;
- historyMessages.value[historyMessages.value.length - 1].thinking =
- thinkingContent;
- }
- ).then(() => {
- historyMessages.value[historyMessages.value.length - 1].id =
- new Date().getTime().toString();
- });
+ if (lastMessage.role === "user") {
+ handleChatResponse(newVal);
}
}
},
{ deep: true }
);
- // 模型列表
- const modelList = ref([]);
-
// 获取模型列表
const getModelList = async () => {
try {
@@ -144,14 +154,14 @@ export const useChatStore = defineStore("chat", () => {
return {
token,
completing,
- chatWithLLM,
+ thinking,
+ modelInfo,
+ modelList,
historyMessages,
+ chatWithLLM,
addMessageToHistory,
clearHistoryMessages,
getModelList,
- modelList,
- modelInfo,
- onlineCount,
- thinking
+ onlineCount
};
});
diff --git a/web/src/stores/index.ts b/web/src/stores/index.ts
index e40c7b1..ef06519 100644
--- a/web/src/stores/index.ts
+++ b/web/src/stores/index.ts
@@ -2,4 +2,3 @@ export * from "./asr_store";
export * from "./chat_store";
export * from "./layout_store";
export * from "./tts_store";
-export * from "./voice_store";
diff --git a/web/src/stores/voice_store.ts b/web/src/stores/voice_store.ts
deleted file mode 100644
index 442607e..0000000
--- a/web/src/stores/voice_store.ts
+++ /dev/null
@@ -1,293 +0,0 @@
-import { useWebSocketStore } from "@/services";
-import { useChatStore, useTtsStore } from "@/stores";
-
-export const useVoiceStore = defineStore("voice", () => {
- // 状态管理
- const isListening = ref(false); // 是否正在监听语音输入
- const isProcessing = ref(false); // 是否正在处理(包括ASR、LLM、TTS全流程)
- const currentSessionId = ref(null); // 当前会话ID
-
- // 依赖的其他store
- const chatStore = useChatStore();
- const ttsStore = useTtsStore();
- const wsStore = useWebSocketStore();
-
- // 语音消息历史
- const voiceMessages = ref<
- {
- id: string;
- type: "user" | "assistant";
- text: string;
- audioId?: string;
- timestamp: number;
- isProcessing?: boolean;
- }[]
- >([]);
-
- // ASR缓冲区状态
- const isRecording = ref(false);
- const recordingStartTime = ref(null);
- const recordingMaxDuration = 60 * 1000; // 最大录音时长 60 秒
-
- /**
- * 开始语音输入
- */
- const startListening = async () => {
- if (isListening.value) return;
-
- try {
- await wsStore.connect();
-
- // 创建新的会话ID
- currentSessionId.value = new Date().getTime().toString();
- isListening.value = true;
- isRecording.value = true;
- recordingStartTime.value = Date.now();
-
- // 开始录音 - 假设我们有一个 startRecording 方法
- // 这里通常会调用浏览器的 MediaRecorder API
- await startRecording();
-
- console.log("开始语音输入");
- } catch (error) {
- console.error("启动语音输入失败:", error);
- stopListening();
- }
- };
-
- /**
- * 停止语音输入
- */
- const stopListening = async () => {
- if (!isListening.value) return;
-
- try {
- // 停止录音
- if (isRecording.value) {
- await stopRecording();
- isRecording.value = false;
- }
-
- isListening.value = false;
- recordingStartTime.value = null;
-
- // 发送结束信号
- wsStore.send(JSON.stringify({ type: "asr_end" }));
- console.log("停止语音输入,等待ASR结果");
- } catch (error) {
- console.error("停止语音输入失败:", error);
- }
- };
-
- /**
- * 录音时间检查
- */
- const checkRecordingTime = () => {
- if (isRecording.value && recordingStartTime.value) {
- const currentTime = Date.now();
- const duration = currentTime - recordingStartTime.value;
-
- if (duration >= recordingMaxDuration) {
- console.log("录音达到最大时长,自动停止");
- stopListening();
- }
- }
- };
-
- // 定时检查录音时间
- let recordingTimer: any = null;
- watch(isRecording, (newVal) => {
- if (newVal) {
- recordingTimer = setInterval(checkRecordingTime, 1000);
- } else if (recordingTimer) {
- clearInterval(recordingTimer);
- recordingTimer = null;
- }
- });
-
- /**
- * 处理ASR结果
- */
- const handleASRResult = async (text: string) => {
- if (!text.trim()) return;
-
- console.log("收到ASR结果:", text);
- isProcessing.value = true;
-
- // 添加用户消息
- const userMessageId = new Date().getTime().toString();
- voiceMessages.value.push({
- id: userMessageId,
- type: "user",
- text,
- timestamp: Date.now()
- });
-
- // 添加助手消息占位
- const assistantMessageId = new Date().getTime().toString();
- voiceMessages.value.push({
- id: assistantMessageId,
- type: "assistant",
- text: "",
- timestamp: Date.now(),
- isProcessing: true
- });
-
- // 调用LLM生成回复
- await generateLLMResponse(text, assistantMessageId);
- };
-
- /**
- * 生成LLM回复
- */
- const generateLLMResponse = async (userInput: string, responseId: string) => {
- try {
- console.log("生成LLM回复...");
-
- // 构建消息历史
- const messages = [
- ...voiceMessages.value
- .filter((msg) => !msg.isProcessing)
- .map((msg) => ({
- role: msg.type === "user" ? "user" : "assistant",
- content: msg.text
- })),
- { role: "user", content: userInput }
- ];
-
- let responseText = "";
-
- // 调用ChatStore的聊天方法
- await chatStore.chatWithLLM(
- {
- messages,
- model: chatStore.modelInfo?.model_id || ""
- },
- // 处理流式回复
- (content) => {
- responseText = content;
- // 更新助手消息
- const index = voiceMessages.value.findIndex(
- (msg) => msg.id === responseId
- );
- if (index !== -1) {
- voiceMessages.value[index].text = content;
- }
- }
- );
-
- // LLM生成完成,转换为语音
- console.log("LLM回复生成完成:", responseText);
- await synthesizeSpeech(responseText, responseId);
- } catch (error) {
- console.error("生成LLM回复失败:", error);
- const index = voiceMessages.value.findIndex(
- (msg) => msg.id === responseId
- );
- if (index !== -1) {
- voiceMessages.value[index].text = "抱歉,生成回复时出错";
- voiceMessages.value[index].isProcessing = false;
- }
- isProcessing.value = false;
- }
- };
-
- /**
- * 转换文本为语音
- */
- const synthesizeSpeech = async (text: string, messageId: string) => {
- try {
- console.log("转换文本为语音...");
-
- // 调用TTS生成语音
- await ttsStore.convertText(text, messageId);
-
- // 注意:TTS音频生成完成后会自动播放
- // 这部分逻辑在TTS Store的finishConversion方法中处理
-
- // 更新消息状态
- const index = voiceMessages.value.findIndex(
- (msg) => msg.id === messageId
- );
- if (index !== -1) {
- voiceMessages.value[index].audioId = messageId;
- voiceMessages.value[index].isProcessing = false;
- }
- } catch (error) {
- console.error("转换文本为语音失败:", error);
- const index = voiceMessages.value.findIndex(
- (msg) => msg.id === messageId
- );
- if (index !== -1) {
- voiceMessages.value[index].isProcessing = false;
- }
- } finally {
- isProcessing.value = false;
- }
- };
-
- /**
- * 清除所有消息
- */
- const clearMessages = () => {
- voiceMessages.value = [];
- };
-
- /**
- * 播放指定消息的语音
- */
- const playMessageAudio = async (messageId: string) => {
- const message = voiceMessages.value.find((msg) => msg.id === messageId);
- if (message && message.audioId) {
- await ttsStore.play(message.audioId);
- }
- };
-
- /**
- * 暂停当前播放的语音
- */
- const pauseAudio = () => {
- ttsStore.pauseAll();
- };
-
- // 录音相关方法 - 这里需要根据实际情况实现
- // 通常会使用MediaRecorder API
- const startRecording = async () => {
- // 实现录音开始逻辑
- // 1. 获取麦克风权限
- // 2. 创建MediaRecorder
- // 3. 监听数据可用事件,发送到WebSocket
- console.log("开始录音...");
- };
-
- const stopRecording = async () => {
- // 实现录音停止逻辑
- console.log("停止录音...");
- };
-
- // 在组件卸载时清理资源
- onUnmounted(() => {
- if (isRecording.value) {
- stopRecording();
- }
- if (recordingTimer) {
- clearInterval(recordingTimer);
- }
- });
-
- return {
- // 状态
- isListening,
- isProcessing,
- isRecording,
- voiceMessages,
-
- // 方法
- startListening,
- stopListening,
- handleASRResult,
- clearMessages,
- playMessageAudio,
- pauseAudio
- };
-});
diff --git a/web/src/utils/clipboard.ts b/web/src/utils/clipboard.ts
new file mode 100644
index 0000000..ab79876
--- /dev/null
+++ b/web/src/utils/clipboard.ts
@@ -0,0 +1,23 @@
+const leagacyCopy = (text: string) => {
+ const input = document.createElement("input");
+ input.value = text;
+ document.body.appendChild(input);
+ input.select();
+ try {
+ document.execCommand("copy");
+ } catch (err) {
+ console.error(err);
+ }
+ document.body.removeChild(input);
+};
+
+export const copy = (text: string) => {
+ if (navigator.clipboard && navigator.clipboard.writeText) {
+ navigator.clipboard.writeText(text).catch((err) => {
+ console.error(err);
+ leagacyCopy(text); // 如果现代API失败,使用旧方法
+ });
+ } else {
+ leagacyCopy(text); // 如果没有现代API,使用旧方法
+ }
+};
diff --git a/web/src/utils/index.ts b/web/src/utils/index.ts
index af31820..5b78882 100644
--- a/web/src/utils/index.ts
+++ b/web/src/utils/index.ts
@@ -1,4 +1,5 @@
export * from "./audio";
+export * from "./clipboard";
export * from "./context";
export * from "./format";
export * from "./media";
diff --git a/web/src/views/ChatLLMView.vue b/web/src/views/ChatLLMView.vue
index 09fc809..177e3c6 100644
--- a/web/src/views/ChatLLMView.vue
+++ b/web/src/views/ChatLLMView.vue
@@ -4,6 +4,7 @@ import type { Message } from "@/interfaces";
import { throttle } from "lodash-es";
import AIAvatar from "@/assets/ai_avatar.png";
import {
+ DocumentDuplicateIcon,
ExclamationTriangleIcon,
microphone,
PaperAirplaneIcon,
@@ -12,6 +13,7 @@ import {
import UserAvatar from "@/assets/user_avatar.jpg";
import markdown from "@/components/markdown.vue";
import { useAsrStore, useChatStore, useLayoutStore } from "@/stores";
+import { copy } from "@/utils";
const chatStore = useChatStore();
const { historyMessages, completing, modelList, modelInfo, thinking } =
@@ -29,6 +31,11 @@ const collapseActive = ref(
historyMessages.value.map((msg, idx) => String(msg.id ?? idx))
);
+// 过滤出type为chat的聊天消息
+const filteredMessages = computed(() =>
+ historyMessages.value.filter((msg) => msg.type === "chat" || !msg.type)
+);
+
const getName = (msg: Message, idx: number) => String(msg.id ?? idx);
// TODO: bugfix: 未能正确展开
@@ -148,7 +155,7 @@ onMounted(() => {
@@ -199,8 +206,18 @@ onMounted(() => {
-
-
+
+
+
+
+
+
+
+
+
+
+ 复制内容
+
@@ -241,7 +258,7 @@ onMounted(() => {
:positive-button-props="{ type: 'error' }"
positive-text="清除"
negative-text="取消"
- @positive-click="chatStore.clearHistoryMessages"
+ @positive-click="chatStore.clearHistoryMessages('chat')"
@negative-click="() => {}"
>
diff --git a/web/src/views/VoiceView.vue b/web/src/views/VoiceView.vue
index 3d3ee1d..2a880e4 100644
--- a/web/src/views/VoiceView.vue
+++ b/web/src/views/VoiceView.vue
@@ -23,6 +23,11 @@ const collapseActive = ref(
historyMessages.value.map((msg, idx) => String(msg.id ?? idx))
);
+// 过滤出type为voice的聊天消息
+const filteredMessages = computed(() =>
+ historyMessages.value.filter((msg) => msg.type === "voice")
+);
+
const getName = (msg: Message, idx: number) => String(msg.id ?? idx);
// TODO: bugfix: 未能正确展开
@@ -135,7 +140,7 @@ onMounted(() => {
@@ -217,7 +222,7 @@ onMounted(() => {
:positive-button-props="{ type: 'error' }"
positive-text="清除"
negative-text="取消"
- @positive-click="chatStore.clearHistoryMessages"
+ @positive-click="chatStore.clearHistoryMessages('voice')"
@negative-click="() => {}"
>