diff --git a/backend/app/api/v1/endpoints/tts.py b/backend/app/api/v1/endpoints/tts.py
index e3462c8..6caafeb 100644
--- a/backend/app/api/v1/endpoints/tts.py
+++ b/backend/app/api/v1/endpoints/tts.py
@@ -9,7 +9,7 @@ from typing import Dict, Any, Optional as OptionalType
from app.constants.tts import APP_ID, TOKEN, SPEAKER
-# 协议常量保持不变...
+# 协议常量
PROTOCOL_VERSION = 0b0001
DEFAULT_HEADER_SIZE = 0b0001
FULL_CLIENT_REQUEST = 0b0001
@@ -35,7 +35,7 @@ EVENT_TTSSentenceEnd = 351
EVENT_TTSResponse = 352
-# 所有类定义保持不变...
+# 所有类定义
class Header:
def __init__(self,
protocol_version=PROTOCOL_VERSION,
@@ -93,7 +93,7 @@ class Response:
self.payload_json = None
-# 工具函数保持不变...
+# 工具函数
def gen_log_id():
"""生成logID"""
ts = int(time.time() * 1000)
@@ -191,7 +191,7 @@ async def send_event(ws, header, optional=None, payload=None):
await ws.send(full_client_request)
-# 修改:TTS状态管理类,添加消息ID和任务追踪
+# TTS状态管理类,添加消息ID和任务追踪
class TTSState:
def __init__(self, message_id: str):
self.message_id = message_id
@@ -374,8 +374,9 @@ async def process_tts_task(websocket, message_id: str, text: str):
elif res.optional.event == EVENT_TTSResponse:
audio_count += 1
print(f"发送音频数据 [{message_id}] #{audio_count},大小: {len(res.payload)}")
- # 发送音频数据,包含消息ID
+ # 发送音频数据
await websocket.send_json({
+ "id": audio_count,
"type": "tts_audio_data",
"messageId": message_id,
"audioData": res.payload.hex() # 转为hex字符串
diff --git a/backend/app/api/v1/endpoints/voice_conversation.py b/backend/app/api/v1/endpoints/voice_conversation.py
new file mode 100644
index 0000000..3501091
--- /dev/null
+++ b/backend/app/api/v1/endpoints/voice_conversation.py
@@ -0,0 +1,105 @@
+import json
+import aiohttp
+import asyncio
+from fastapi.encoders import jsonable_encoder
+from starlette.websockets import WebSocket
+
+from . import tts
+from app.constants.model_data import tip_message, base_url, headers
+
+
+async def process_voice_conversation(websocket: WebSocket, asr_text: str, message_id: str):
+ try:
+ print(f"开始处理语音对话 [{message_id}]: {asr_text}")
+
+ # 1. 发送ASR识别结果到前端
+ await websocket.send_json({
+ "type": "asr_result",
+ "messageId": message_id,
+ "result": asr_text
+ })
+
+ # 2. 构建LLM请求
+ messages = [
+ tip_message,
+ {"role": "user", "content": asr_text}
+ ]
+ payload = {
+ "model": "gpt-4o",
+ "messages": messages,
+ "stream": True
+ }
+
+ print(f"发送LLM请求 [{message_id}]: {json.dumps(payload, ensure_ascii=False)}")
+
+ # 3. 流式处理LLM响应
+ full_response = ""
+ llm_completed = False
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ base_url,
+ headers=headers,
+ json=jsonable_encoder(payload)
+ ) as resp:
+ if resp.status != 200:
+ error_text = await resp.text()
+ raise Exception(f"LLM API请求失败: {resp.status} - {error_text}")
+
+ # 读取流式响应
+ async for line in resp.content:
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ data = line[6:].strip()
+ if data == '[DONE]':
+ llm_completed = True
+ print(f"LLM响应完成 [{message_id}]")
+ break
+
+ try:
+ result = json.loads(data)
+ # 提取内容
+ choices = result.get("choices", [])
+ if not choices:
+ # 跳过空choices数据包
+ continue
+
+ delta = choices[0].get("delta", {})
+ content = delta.get("content")
+
+ if content:
+ full_response += content
+
+ except json.JSONDecodeError as e:
+ print(f"JSON解析错误 [{message_id}]: {e}, 数据: {data}")
+ continue
+ except Exception as e:
+ print(f"处理数据包异常 [{message_id}]: {e}, 数据: {data}")
+ continue
+
+ # 4. LLM生成完成后,启动完整的TTS处理
+ if llm_completed and full_response:
+ print(f"LLM生成完成 [{message_id}], 总内容长度: {len(full_response)}")
+ print(f"完整内容: {full_response}")
+
+ # 发送完成消息
+ await websocket.send_json({
+ "type": "llm_complete_response",
+ "messageId": message_id,
+ "content": full_response
+ })
+
+ # 启动TTS处理完整内容
+ print(f"启动完整TTS处理 [{message_id}]: {full_response}")
+ await tts.handle_tts_text(websocket, message_id, full_response)
+
+ except Exception as e:
+ print(f"语音对话处理异常 [{message_id}]: {e}")
+ import traceback
+ traceback.print_exc()
+ await websocket.send_json({
+ "type": "voice_conversation_error",
+ "messageId": message_id,
+ "message": f"处理失败: {str(e)}"
+ })
diff --git a/backend/app/api/v1/endpoints/websocket_service.py b/backend/app/api/v1/endpoints/websocket_service.py
index 3e683ed..d6d32f5 100644
--- a/backend/app/api/v1/endpoints/websocket_service.py
+++ b/backend/app/api/v1/endpoints/websocket_service.py
@@ -1,12 +1,14 @@
# websocket_service.py
+import uuid
+
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from typing import Set
from aip import AipSpeech
from app.constants.asr import APP_ID, API_KEY, SECRET_KEY
import json
-# 导入修改后的TTS模块
from . import tts
+from .voice_conversation import process_voice_conversation
router = APIRouter()
active_connections: Set[WebSocket] = set()
@@ -59,10 +61,15 @@ async def websocket_online_count(websocket: WebSocket):
elif msg_type == "asr_end":
asr_text = await asr_buffer(temp_buffer)
- await websocket.send_json({"type": "asr_result", "result": asr_text})
+ # 从data中获取messageId,如果不存在则生成一个新的ID
+ message_id = data.get("messageId", "voice_" + str(uuid.uuid4()))
+ if data.get("voiceConversation"):
+ await process_voice_conversation(websocket, asr_text, message_id)
+ else:
+ await websocket.send_json({"type": "asr_result", "result": asr_text})
temp_buffer = bytes()
- # 修改:TTS处理支持消息ID
+ # TTS处理
elif msg_type == "tts_text":
message_id = data.get("messageId")
text = data.get("text", "")
diff --git a/web/components.d.ts b/web/components.d.ts
index ddc6cfc..3400cc8 100644
--- a/web/components.d.ts
+++ b/web/components.d.ts
@@ -21,6 +21,7 @@ declare module 'vue' {
NPopover: typeof import('naive-ui')['NPopover']
NScrollbar: typeof import('naive-ui')['NScrollbar']
NSelect: typeof import('naive-ui')['NSelect']
+ NSpin: typeof import('naive-ui')['NSpin']
NTag: typeof import('naive-ui')['NTag']
RouterLink: typeof import('vue-router')['RouterLink']
RouterView: typeof import('vue-router')['RouterView']
diff --git a/web/eslint.config.js b/web/eslint.config.js
index be6c010..553e2a1 100644
--- a/web/eslint.config.js
+++ b/web/eslint.config.js
@@ -32,6 +32,7 @@ export default antfu({
"ts/no-unsafe-function-type": "off",
"no-console": "off",
"unused-imports/no-unused-vars": "warn",
- "ts/no-use-before-define": "off"
+ "ts/no-use-before-define": "off",
+ "vue/operator-linebreak": "off",
}
});
diff --git a/web/src/assets/Icons/index.ts b/web/src/assets/Icons/index.ts
index 409197d..765b451 100644
--- a/web/src/assets/Icons/index.ts
+++ b/web/src/assets/Icons/index.ts
@@ -1,4 +1,5 @@
export { default as ChevronLeftIcon } from "./svg/heroicons/ChevronLeftIcon.svg?component";
+export { default as DocumentDuplicateIcon } from "./svg/heroicons/DocumentDuplicateIcon.svg?component";
export { default as ExclamationTriangleIcon } from "./svg/heroicons/ExclamationTriangleIcon.svg?component";
export { default as microphone } from "./svg/heroicons/MicrophoneIcon.svg?component";
export { default as PaperAirplaneIcon } from "./svg/heroicons/PaperAirplaneIcon.svg?component";
diff --git a/web/src/assets/Icons/svg/heroicons/DocumentDuplicateIcon.svg b/web/src/assets/Icons/svg/heroicons/DocumentDuplicateIcon.svg
new file mode 100644
index 0000000..bf48f9f
--- /dev/null
+++ b/web/src/assets/Icons/svg/heroicons/DocumentDuplicateIcon.svg
@@ -0,0 +1,3 @@
+
diff --git a/web/src/components/tts.vue b/web/src/components/tts.vue
index b7000e6..1e673bd 100644
--- a/web/src/components/tts.vue
+++ b/web/src/components/tts.vue
@@ -34,13 +34,15 @@ const handleClick = () => {
ttsStore.convertText(text, messageId);
}
};
-// 当文本改变时清理之前的音频
+
+// 文本改变清理之前的音频
watch(
() => text,
() => {
ttsStore.clearAudio(messageId);
}
);
+
onUnmounted(() => {
ttsStore.clearAudio(messageId);
});
diff --git a/web/src/interfaces/chat_service.ts b/web/src/interfaces/chat_service.ts
index 94e2f51..2724c15 100644
--- a/web/src/interfaces/chat_service.ts
+++ b/web/src/interfaces/chat_service.ts
@@ -12,6 +12,7 @@ export interface Message {
role?: string;
usage?: UsageInfo;
id?: string;
+ type?: 'chat' | 'voice';
[property: string]: any;
}
diff --git a/web/src/layouts/BasicLayout.vue b/web/src/layouts/BasicLayout.vue
index d2c6c0d..0664ca1 100644
--- a/web/src/layouts/BasicLayout.vue
+++ b/web/src/layouts/BasicLayout.vue
@@ -45,7 +45,20 @@ const { hiddenLeftSidebar, simpleMode } = storeToRefs(layoutStore);
"
to="/"
>
- 聊天
+ 对话
+
+
+ 语音聊天
diff --git a/web/src/router/index.ts b/web/src/router/index.ts
index e4daaed..10ee4a9 100644
--- a/web/src/router/index.ts
+++ b/web/src/router/index.ts
@@ -2,7 +2,8 @@ import { createRouter, createWebHistory } from "vue-router";
import BasicLayout from "@/layouts/BasicLayout.vue";
import { resetDescription, setTitle } from "@/utils";
-import community from "@/views/CommunityView.vue";
+import ChatLLM from "@/views/ChatLLMView.vue";
+import VoiceView from "@/views/VoiceView.vue";
const router = createRouter({
history: createWebHistory(import.meta.env.BASE_URL),
@@ -13,11 +14,19 @@ const router = createRouter({
children: [
{
path: "",
- name: "community",
- component: community,
+ name: "ChatLLM",
+ component: ChatLLM,
meta: {
title: "对话"
}
+ },
+ {
+ path: "/voice",
+ name: "Voice",
+ component: VoiceView,
+ meta: {
+ title: "语音对话"
+ }
}
]
}
diff --git a/web/src/services/websocket.ts b/web/src/services/websocket.ts
index 489ec21..78fd4c8 100644
--- a/web/src/services/websocket.ts
+++ b/web/src/services/websocket.ts
@@ -6,6 +6,7 @@ export const useWebSocketStore = defineStore("websocket", () => {
const connected = ref(false);
const chatStore = useChatStore();
const ttsStore = useTtsStore();
+ const router = useRouter();
const { onlineCount } = storeToRefs(chatStore);
@@ -14,13 +15,11 @@ export const useWebSocketStore = defineStore("websocket", () => {
if (e.data instanceof ArrayBuffer) {
// 处理二进制音频数据(兜底处理,新版本应该不会用到)
console.log("收到二进制音频数据,大小:", e.data.byteLength);
- console.warn("收到旧格式的二进制数据,无法确定messageId");
// 可以选择忽略或者作为兜底处理
} else if (e.data instanceof Blob) {
// 如果是Blob,转换为ArrayBuffer(兜底处理)
e.data.arrayBuffer().then((buffer: ArrayBuffer) => {
console.log("收到Blob音频数据,大小:", buffer.byteLength);
- console.warn("收到旧格式的Blob数据,无法确定messageId");
});
} else if (typeof e.data === "string") {
// 处理文本JSON消息
@@ -31,7 +30,14 @@ export const useWebSocketStore = defineStore("websocket", () => {
onlineCount.value = data.online_count;
break;
case "asr_result":
- chatStore.addMessageToHistory(data.result);
+ if (router.currentRoute.value.path === "/") {
+ chatStore.addMessageToHistory(data.result);
+ } else if (router.currentRoute.value.path === "/voice") {
+ // 在语音页面的处理
+ chatStore.addMessageToHistory(data.result, "user", "voice");
+ } else {
+ console.warn(data);
+ }
break;
// 新的TTS消息格式处理
@@ -76,7 +82,6 @@ export const useWebSocketStore = defineStore("websocket", () => {
ttsStore.finishConversion(data.messageId);
} else {
console.log("TTS音频传输完成(无messageId)");
- // 兜底处理,可能是旧格式
ttsStore.finishConversion(data.messageId);
}
break;
@@ -85,7 +90,6 @@ export const useWebSocketStore = defineStore("websocket", () => {
// TTS会话结束
if (data.messageId) {
console.log(`TTS会话结束 [${data.messageId}]`);
- // 可以添加额外的清理逻辑
} else {
console.log("TTS会话结束");
}
@@ -98,17 +102,15 @@ export const useWebSocketStore = defineStore("websocket", () => {
ttsStore.handleError(data.message, data.messageId);
} else {
console.error("TTS错误:", data.message);
- // 兜底处理,可能是旧格式
ttsStore.handleError(data.message, data.messageId || "unknown");
}
break;
- // 保留旧的消息类型作为兜底处理
- case "tts_audio_complete_legacy":
- case "tts_complete_legacy":
- case "tts_error_legacy":
- console.log("收到旧格式TTS消息:", data.type);
- // 可以选择处理或忽略
+ case "llm_complete_response":
+ // LLM部分响应
+ if (router.currentRoute.value.path === "/voice") {
+ chatStore.addMessageToHistory(data.content, "assistant", "voice");
+ }
break;
default:
diff --git a/web/src/stores/asr_store.ts b/web/src/stores/asr_store.ts
index f5b6b76..39a7fd4 100644
--- a/web/src/stores/asr_store.ts
+++ b/web/src/stores/asr_store.ts
@@ -11,8 +11,8 @@ export const useAsrStore = defineStore("asr", () => {
let mediaStreamSource: MediaStreamAudioSourceNode | null = null;
let workletNode: AudioWorkletNode | null = null;
- // 获取 WebSocket store 实例
const webSocketStore = useWebSocketStore();
+ const router = useRouter();
/**
* 发送消息到 WebSocket
@@ -81,16 +81,19 @@ export const useAsrStore = defineStore("asr", () => {
const processorUrl = URL.createObjectURL(blob);
// 加载AudioWorklet模块
await audioContext.audioWorklet.addModule(processorUrl);
- // 释放URL对象(防止内存泄漏)
+ // 释放URL对象防止内存泄漏
URL.revokeObjectURL(processorUrl);
+
// 创建音频源节点
mediaStreamSource = audioContext.createMediaStreamSource(stream);
+
// 创建AudioWorkletNode
workletNode = new AudioWorkletNode(audioContext, "audio-processor", {
numberOfInputs: 1,
numberOfOutputs: 1,
channelCount: 1
});
+
// 监听来自AudioWorklet的音频数据
workletNode.port.onmessage = (event) => {
if (event.data.type === "audiodata") {
@@ -116,8 +119,14 @@ export const useAsrStore = defineStore("asr", () => {
const stopRecording = () => {
if (!isRecording.value) return;
+ const messageId = `voice_${Date.now()}`;
// 通知后端录音结束
- sendMessage(JSON.stringify({ type: "asr_end" }));
+ const msg: Record = { type: "asr_end" };
+ if (router.currentRoute.value.path === "/voice") {
+ msg.messageId = messageId;
+ msg.voiceConversation = true;
+ }
+ sendMessage(JSON.stringify(msg));
// 停止所有音轨
if (mediaStreamSource?.mediaStream) {
diff --git a/web/src/stores/chat_store.ts b/web/src/stores/chat_store.ts
index 8392837..51dfb3e 100644
--- a/web/src/stores/chat_store.ts
+++ b/web/src/stores/chat_store.ts
@@ -7,7 +7,9 @@ import type {
import { ChatService } from "@/services";
export const useChatStore = defineStore("chat", () => {
+ const router = useRouter();
const token = "sk-fkGVZBrAqvIxLjlF3b5f19EfBb63486c90Fa5a1fBd7076Ee";
+
// 默认模型
const modelInfo = ref(null);
// 历史消息
@@ -16,32 +18,35 @@ export const useChatStore = defineStore("chat", () => {
const completing = ref(false);
// 是否正在思考
const thinking = ref(false);
+ // 模型列表
+ const modelList = ref([]);
// 在线人数
const onlineCount = ref(0);
+ // 生成消息ID方法
+ const generateMessageId = () => new Date().getTime().toString();
+
+ // 获取最后一条消息
+ const getLastMessage = () =>
+ historyMessages.value[historyMessages.value.length - 1];
+
// 与 LLM 聊天
const chatWithLLM = async (
request: IChatWithLLMRequest,
- onProgress: (content: string) => void, // 接收内容进度回调
- getUsageInfo: (object: UsageInfo) => void = () => {}, // 接收使用信息回调
- getThinking: (thinkingContent: string) => void = () => {} // 接收思维链内容回调
+ onProgress: (content: string) => void,
+ getUsageInfo: (object: UsageInfo) => void = () => {},
+ getThinking: (thinkingContent: string) => void = () => {}
) => {
if (completing.value) throw new Error("正在响应中");
- completing.value = true; // 开始请求
+ completing.value = true;
try {
await ChatService.ChatWithLLM(
token,
request,
- (content) => {
- onProgress(content);
- },
- (object: UsageInfo) => {
- getUsageInfo(object);
- },
- (thinkingContent: string) => {
- getThinking(thinkingContent);
- }
+ onProgress,
+ getUsageInfo,
+ getThinking
);
} catch (error) {
console.error("请求失败:", error);
@@ -51,28 +56,33 @@ export const useChatStore = defineStore("chat", () => {
};
// 添加消息到历史记录
- const addMessageToHistory = (message: string) => {
+ const addMessageToHistory = (
+ message: string,
+ role: "user" | "assistant" = "user",
+ type: "chat" | "voice" = "chat"
+ ) => {
const content = message.trim();
if (!content) return;
historyMessages.value.push({
- role: "user",
- content
+ role,
+ content,
+ type,
+ id: generateMessageId()
});
};
// 清除历史消息
- const clearHistoryMessages = () => {
- historyMessages.value = [];
+ const clearHistoryMessages = (type?: "chat" | "voice") => {
+ historyMessages.value = type
+ ? historyMessages.value.filter((msg) => msg.type !== type)
+ : [];
};
- // 确保最后一条消息是助手消息,如果最后一条消息不是,就加一条空的占位,不然后面的思维链会丢失
+ // 确保最后一条消息是助手消息
const ensureAssistantMessage = () => {
- if (
- historyMessages.value.length === 0 ||
- historyMessages.value[historyMessages.value.length - 1].role !==
- "assistant"
- ) {
+ const lastMessage = getLastMessage();
+ if (!lastMessage || lastMessage.role !== "assistant") {
historyMessages.value.push({
role: "assistant",
content: ""
@@ -80,57 +90,57 @@ export const useChatStore = defineStore("chat", () => {
}
};
+ // 处理聊天响应的逻辑
+ const handleChatResponse = async (
+ messages: IChatWithLLMRequest["messages"]
+ ) => {
+ if (!modelInfo.value) return;
+
+ // 过滤出type为chat的聊天消息
+ const filteredMessages = computed(() =>
+ messages.filter((msg) => msg.type === "chat" || !msg.type)
+ );
+
+ await chatWithLLM(
+ { messages: filteredMessages.value, model: modelInfo.value.model_id },
+ // 处理文本内容
+ (content) => {
+ ensureAssistantMessage();
+ thinking.value = false;
+ getLastMessage().content = content;
+ },
+ // 处理使用信息
+ (usageInfo: UsageInfo) => {
+ const lastMessage = getLastMessage();
+ if (lastMessage?.role === "assistant") {
+ lastMessage.usage = usageInfo;
+ }
+ },
+ // 处理思维链
+ (thinkingContent: string) => {
+ ensureAssistantMessage();
+ thinking.value = true;
+ getLastMessage().thinking = thinkingContent;
+ }
+ );
+
+ // 设置消息ID
+ getLastMessage().id = generateMessageId();
+ };
+
watch(
historyMessages,
(newVal) => {
- // 当历史消息变化时,发送请求
- if (newVal.length > 0) {
+ if (newVal.length > 0 && router.currentRoute.value.path === "/") {
const lastMessage = newVal[newVal.length - 1];
- if (lastMessage.role === "user" && modelInfo.value) {
- chatWithLLM(
- {
- messages: newVal,
- model: modelInfo.value?.model_id
- },
- // 处理进度回调,文本
- (content) => {
- ensureAssistantMessage();
- thinking.value = false;
- historyMessages.value[historyMessages.value.length - 1].content =
- content;
- },
- // 处理使用usage信息回调
- (usageInfo: UsageInfo) => {
- // 如果最后一条消息是助手的回复,则更新使用信息
- if (
- historyMessages.value.length > 0 &&
- historyMessages.value[historyMessages.value.length - 1].role ===
- "assistant"
- ) {
- historyMessages.value[historyMessages.value.length - 1].usage =
- usageInfo;
- }
- },
- // 处理思维链内容回调
- (thinkingContent: string) => {
- ensureAssistantMessage();
- thinking.value = true;
- historyMessages.value[historyMessages.value.length - 1].thinking =
- thinkingContent;
- }
- ).then(() => {
- historyMessages.value[historyMessages.value.length - 1].id =
- new Date().getTime().toString();
- });
+ if (lastMessage.role === "user") {
+ handleChatResponse(newVal);
}
}
},
{ deep: true }
);
- // 模型列表
- const modelList = ref([]);
-
// 获取模型列表
const getModelList = async () => {
try {
@@ -144,14 +154,14 @@ export const useChatStore = defineStore("chat", () => {
return {
token,
completing,
- chatWithLLM,
+ thinking,
+ modelInfo,
+ modelList,
historyMessages,
+ chatWithLLM,
addMessageToHistory,
clearHistoryMessages,
getModelList,
- modelList,
- modelInfo,
- onlineCount,
- thinking
+ onlineCount
};
});
diff --git a/web/src/stores/index.ts b/web/src/stores/index.ts
index 9647e3d..ef06519 100644
--- a/web/src/stores/index.ts
+++ b/web/src/stores/index.ts
@@ -1,4 +1,4 @@
export * from "./asr_store";
export * from "./chat_store";
export * from "./layout_store";
-export * from "./tts_store";
\ No newline at end of file
+export * from "./tts_store";
diff --git a/web/src/utils/clipboard.ts b/web/src/utils/clipboard.ts
new file mode 100644
index 0000000..ab79876
--- /dev/null
+++ b/web/src/utils/clipboard.ts
@@ -0,0 +1,23 @@
+const leagacyCopy = (text: string) => {
+ const input = document.createElement("input");
+ input.value = text;
+ document.body.appendChild(input);
+ input.select();
+ try {
+ document.execCommand("copy");
+ } catch (err) {
+ console.error(err);
+ }
+ document.body.removeChild(input);
+};
+
+export const copy = (text: string) => {
+ if (navigator.clipboard && navigator.clipboard.writeText) {
+ navigator.clipboard.writeText(text).catch((err) => {
+ console.error(err);
+ leagacyCopy(text); // 如果现代API失败,使用旧方法
+ });
+ } else {
+ leagacyCopy(text); // 如果没有现代API,使用旧方法
+ }
+};
diff --git a/web/src/utils/index.ts b/web/src/utils/index.ts
index af31820..5b78882 100644
--- a/web/src/utils/index.ts
+++ b/web/src/utils/index.ts
@@ -1,4 +1,5 @@
export * from "./audio";
+export * from "./clipboard";
export * from "./context";
export * from "./format";
export * from "./media";
diff --git a/web/src/views/CommunityView.vue b/web/src/views/ChatLLMView.vue
similarity index 90%
rename from web/src/views/CommunityView.vue
rename to web/src/views/ChatLLMView.vue
index 09fc809..177e3c6 100644
--- a/web/src/views/CommunityView.vue
+++ b/web/src/views/ChatLLMView.vue
@@ -4,6 +4,7 @@ import type { Message } from "@/interfaces";
import { throttle } from "lodash-es";
import AIAvatar from "@/assets/ai_avatar.png";
import {
+ DocumentDuplicateIcon,
ExclamationTriangleIcon,
microphone,
PaperAirplaneIcon,
@@ -12,6 +13,7 @@ import {
import UserAvatar from "@/assets/user_avatar.jpg";
import markdown from "@/components/markdown.vue";
import { useAsrStore, useChatStore, useLayoutStore } from "@/stores";
+import { copy } from "@/utils";
const chatStore = useChatStore();
const { historyMessages, completing, modelList, modelInfo, thinking } =
@@ -29,6 +31,11 @@ const collapseActive = ref(
historyMessages.value.map((msg, idx) => String(msg.id ?? idx))
);
+// 过滤出type为chat的聊天消息
+const filteredMessages = computed(() =>
+ historyMessages.value.filter((msg) => msg.type === "chat" || !msg.type)
+);
+
const getName = (msg: Message, idx: number) => String(msg.id ?? idx);
// TODO: bugfix: 未能正确展开
@@ -148,7 +155,7 @@ onMounted(() => {
@@ -199,8 +206,18 @@ onMounted(() => {
-
-
+
+
+
+
+
+
+
+
+
+
+ 复制内容
+
@@ -241,7 +258,7 @@ onMounted(() => {
:positive-button-props="{ type: 'error' }"
positive-text="清除"
negative-text="取消"
- @positive-click="chatStore.clearHistoryMessages"
+ @positive-click="chatStore.clearHistoryMessages('chat')"
@negative-click="() => {}"
>
diff --git a/web/src/views/VoiceView.vue b/web/src/views/VoiceView.vue
new file mode 100644
index 0000000..2a880e4
--- /dev/null
+++ b/web/src/views/VoiceView.vue
@@ -0,0 +1,259 @@
+
+
+
+
+
+
+
+
+
+
+
+ 助手:
+ 你好,我是你的智能助手,请问有什么可以帮助你的吗?
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
{{
+ msg.role === "user" ? "你:" : "助手:"
+ }}
+
+
+ Tokens: {{ msg.usage?.total_tokens }}
+
+
+
+ handleItemHeaderClick(getName(msg, idx))
+ "
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 正在语音输入...
+
+
+
+
+
+
+
+
+ {}"
+ >
+
+
+
+
+
+ 清除历史
+
+
+
+ 确定要清除历史消息吗?
+
+
+
+
+ {{ isRecording ? "停止输入" : "语音输入" }}
+
+
+
+
+
+
+