feat: 完成大部分功能开发
This commit is contained in:
@@ -374,8 +374,9 @@ async def process_tts_task(websocket, message_id: str, text: str):
|
||||
elif res.optional.event == EVENT_TTSResponse:
|
||||
audio_count += 1
|
||||
print(f"发送音频数据 [{message_id}] #{audio_count},大小: {len(res.payload)}")
|
||||
# 发送音频数据,包含消息ID
|
||||
# 发送音频数据
|
||||
await websocket.send_json({
|
||||
"id": audio_count,
|
||||
"type": "tts_audio_data",
|
||||
"messageId": message_id,
|
||||
"audioData": res.payload.hex() # 转为hex字符串
|
||||
|
||||
105
backend/app/api/v1/endpoints/voice_conversation.py
Normal file
105
backend/app/api/v1/endpoints/voice_conversation.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import json
|
||||
import aiohttp
|
||||
import asyncio
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from starlette.websockets import WebSocket
|
||||
|
||||
from . import tts
|
||||
from app.constants.model_data import tip_message, base_url, headers
|
||||
|
||||
|
||||
async def process_voice_conversation(websocket: WebSocket, asr_text: str, message_id: str):
|
||||
try:
|
||||
print(f"开始处理语音对话 [{message_id}]: {asr_text}")
|
||||
|
||||
# 1. 发送ASR识别结果到前端
|
||||
await websocket.send_json({
|
||||
"type": "asr_result",
|
||||
"messageId": message_id,
|
||||
"result": asr_text
|
||||
})
|
||||
|
||||
# 2. 构建LLM请求
|
||||
messages = [
|
||||
tip_message,
|
||||
{"role": "user", "content": asr_text}
|
||||
]
|
||||
payload = {
|
||||
"model": "gpt-4o",
|
||||
"messages": messages,
|
||||
"stream": True
|
||||
}
|
||||
|
||||
print(f"发送LLM请求 [{message_id}]: {json.dumps(payload, ensure_ascii=False)}")
|
||||
|
||||
# 3. 流式处理LLM响应
|
||||
full_response = ""
|
||||
llm_completed = False
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
base_url,
|
||||
headers=headers,
|
||||
json=jsonable_encoder(payload)
|
||||
) as resp:
|
||||
if resp.status != 200:
|
||||
error_text = await resp.text()
|
||||
raise Exception(f"LLM API请求失败: {resp.status} - {error_text}")
|
||||
|
||||
# 读取流式响应
|
||||
async for line in resp.content:
|
||||
if line:
|
||||
line = line.decode('utf-8').strip()
|
||||
if line.startswith('data: '):
|
||||
data = line[6:].strip()
|
||||
if data == '[DONE]':
|
||||
llm_completed = True
|
||||
print(f"LLM响应完成 [{message_id}]")
|
||||
break
|
||||
|
||||
try:
|
||||
result = json.loads(data)
|
||||
# 提取内容
|
||||
choices = result.get("choices", [])
|
||||
if not choices:
|
||||
# 跳过空choices数据包
|
||||
continue
|
||||
|
||||
delta = choices[0].get("delta", {})
|
||||
content = delta.get("content")
|
||||
|
||||
if content:
|
||||
full_response += content
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"JSON解析错误 [{message_id}]: {e}, 数据: {data}")
|
||||
continue
|
||||
except Exception as e:
|
||||
print(f"处理数据包异常 [{message_id}]: {e}, 数据: {data}")
|
||||
continue
|
||||
|
||||
# 4. LLM生成完成后,启动完整的TTS处理
|
||||
if llm_completed and full_response:
|
||||
print(f"LLM生成完成 [{message_id}], 总内容长度: {len(full_response)}")
|
||||
print(f"完整内容: {full_response}")
|
||||
|
||||
# 发送完成消息
|
||||
await websocket.send_json({
|
||||
"type": "llm_complete_response",
|
||||
"messageId": message_id,
|
||||
"content": full_response
|
||||
})
|
||||
|
||||
# 启动TTS处理完整内容
|
||||
print(f"启动完整TTS处理 [{message_id}]: {full_response}")
|
||||
await tts.handle_tts_text(websocket, message_id, full_response)
|
||||
|
||||
except Exception as e:
|
||||
print(f"语音对话处理异常 [{message_id}]: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
await websocket.send_json({
|
||||
"type": "voice_conversation_error",
|
||||
"messageId": message_id,
|
||||
"message": f"处理失败: {str(e)}"
|
||||
})
|
||||
@@ -1,4 +1,6 @@
|
||||
# websocket_service.py
|
||||
import uuid
|
||||
|
||||
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
||||
from typing import Set
|
||||
from aip import AipSpeech
|
||||
@@ -6,6 +8,7 @@ from app.constants.asr import APP_ID, API_KEY, SECRET_KEY
|
||||
import json
|
||||
|
||||
from . import tts
|
||||
from .voice_conversation import process_voice_conversation
|
||||
|
||||
router = APIRouter()
|
||||
active_connections: Set[WebSocket] = set()
|
||||
@@ -58,7 +61,12 @@ async def websocket_online_count(websocket: WebSocket):
|
||||
|
||||
elif msg_type == "asr_end":
|
||||
asr_text = await asr_buffer(temp_buffer)
|
||||
await websocket.send_json({"type": "asr_result", "result": asr_text})
|
||||
# 从data中获取messageId,如果不存在则生成一个新的ID
|
||||
message_id = data.get("messageId", "voice_" + str(uuid.uuid4()))
|
||||
if data.get("voiceConversation"):
|
||||
await process_voice_conversation(websocket, asr_text, message_id)
|
||||
else:
|
||||
await websocket.send_json({"type": "asr_result", "result": asr_text})
|
||||
temp_buffer = bytes()
|
||||
|
||||
# TTS处理
|
||||
|
||||
1
web/components.d.ts
vendored
1
web/components.d.ts
vendored
@@ -21,6 +21,7 @@ declare module 'vue' {
|
||||
NPopover: typeof import('naive-ui')['NPopover']
|
||||
NScrollbar: typeof import('naive-ui')['NScrollbar']
|
||||
NSelect: typeof import('naive-ui')['NSelect']
|
||||
NSpin: typeof import('naive-ui')['NSpin']
|
||||
NTag: typeof import('naive-ui')['NTag']
|
||||
RouterLink: typeof import('vue-router')['RouterLink']
|
||||
RouterView: typeof import('vue-router')['RouterView']
|
||||
|
||||
@@ -32,6 +32,7 @@ export default antfu({
|
||||
"ts/no-unsafe-function-type": "off",
|
||||
"no-console": "off",
|
||||
"unused-imports/no-unused-vars": "warn",
|
||||
"ts/no-use-before-define": "off"
|
||||
"ts/no-use-before-define": "off",
|
||||
"vue/operator-linebreak": "off",
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
export { default as ChevronLeftIcon } from "./svg/heroicons/ChevronLeftIcon.svg?component";
|
||||
export { default as DocumentDuplicateIcon } from "./svg/heroicons/DocumentDuplicateIcon.svg?component";
|
||||
export { default as ExclamationTriangleIcon } from "./svg/heroicons/ExclamationTriangleIcon.svg?component";
|
||||
export { default as microphone } from "./svg/heroicons/MicrophoneIcon.svg?component";
|
||||
export { default as PaperAirplaneIcon } from "./svg/heroicons/PaperAirplaneIcon.svg?component";
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="size-6">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" d="M15.75 17.25v3.375c0 .621-.504 1.125-1.125 1.125h-9.75a1.125 1.125 0 0 1-1.125-1.125V7.875c0-.621.504-1.125 1.125-1.125H6.75a9.06 9.06 0 0 1 1.5.124m7.5 10.376h3.375c.621 0 1.125-.504 1.125-1.125V11.25c0-4.46-3.243-8.161-7.5-8.876a9.06 9.06 0 0 0-1.5-.124H9.375c-.621 0-1.125.504-1.125 1.125v3.5m7.5 10.375H9.375a1.125 1.125 0 0 1-1.125-1.125v-9.25m12 6.625v-1.875a3.375 3.375 0 0 0-3.375-3.375h-1.5a1.125 1.125 0 0 1-1.125-1.125v-1.5a3.375 3.375 0 0 0-3.375-3.375H9.75" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 668 B |
@@ -12,6 +12,7 @@ export interface Message {
|
||||
role?: string;
|
||||
usage?: UsageInfo;
|
||||
id?: string;
|
||||
type?: 'chat' | 'voice';
|
||||
[property: string]: any;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useChatStore, useTtsStore,useVoiceStore } from "@/stores";
|
||||
import { useChatStore, useTtsStore } from "@/stores";
|
||||
|
||||
// WebSocket
|
||||
export const useWebSocketStore = defineStore("websocket", () => {
|
||||
@@ -6,7 +6,6 @@ export const useWebSocketStore = defineStore("websocket", () => {
|
||||
const connected = ref(false);
|
||||
const chatStore = useChatStore();
|
||||
const ttsStore = useTtsStore();
|
||||
const voiceStore = useVoiceStore();
|
||||
const router = useRouter();
|
||||
|
||||
const { onlineCount } = storeToRefs(chatStore);
|
||||
@@ -16,13 +15,11 @@ export const useWebSocketStore = defineStore("websocket", () => {
|
||||
if (e.data instanceof ArrayBuffer) {
|
||||
// 处理二进制音频数据(兜底处理,新版本应该不会用到)
|
||||
console.log("收到二进制音频数据,大小:", e.data.byteLength);
|
||||
console.warn("收到旧格式的二进制数据,无法确定messageId");
|
||||
// 可以选择忽略或者作为兜底处理
|
||||
} else if (e.data instanceof Blob) {
|
||||
// 如果是Blob,转换为ArrayBuffer(兜底处理)
|
||||
e.data.arrayBuffer().then((buffer: ArrayBuffer) => {
|
||||
console.log("收到Blob音频数据,大小:", buffer.byteLength);
|
||||
console.warn("收到旧格式的Blob数据,无法确定messageId");
|
||||
});
|
||||
} else if (typeof e.data === "string") {
|
||||
// 处理文本JSON消息
|
||||
@@ -36,8 +33,8 @@ export const useWebSocketStore = defineStore("websocket", () => {
|
||||
if (router.currentRoute.value.path === "/") {
|
||||
chatStore.addMessageToHistory(data.result);
|
||||
} else if (router.currentRoute.value.path === "/voice") {
|
||||
// 在语音页面,使用VoiceStore处理
|
||||
voiceStore.handleASRResult(data.result);
|
||||
// 在语音页面的处理
|
||||
chatStore.addMessageToHistory(data.result, "user", "voice");
|
||||
} else {
|
||||
console.warn(data);
|
||||
}
|
||||
@@ -109,6 +106,13 @@ export const useWebSocketStore = defineStore("websocket", () => {
|
||||
}
|
||||
break;
|
||||
|
||||
case "llm_complete_response":
|
||||
// LLM部分响应
|
||||
if (router.currentRoute.value.path === "/voice") {
|
||||
chatStore.addMessageToHistory(data.content, "assistant", "voice");
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
console.log("未知消息类型:", data.type, data);
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ export const useAsrStore = defineStore("asr", () => {
|
||||
let mediaStreamSource: MediaStreamAudioSourceNode | null = null;
|
||||
let workletNode: AudioWorkletNode | null = null;
|
||||
|
||||
// 获取 WebSocket store 实例
|
||||
const webSocketStore = useWebSocketStore();
|
||||
const router = useRouter();
|
||||
|
||||
/**
|
||||
* 发送消息到 WebSocket
|
||||
@@ -81,16 +81,19 @@ export const useAsrStore = defineStore("asr", () => {
|
||||
const processorUrl = URL.createObjectURL(blob);
|
||||
// 加载AudioWorklet模块
|
||||
await audioContext.audioWorklet.addModule(processorUrl);
|
||||
// 释放URL对象(防止内存泄漏)
|
||||
// 释放URL对象防止内存泄漏
|
||||
URL.revokeObjectURL(processorUrl);
|
||||
|
||||
// 创建音频源节点
|
||||
mediaStreamSource = audioContext.createMediaStreamSource(stream);
|
||||
|
||||
// 创建AudioWorkletNode
|
||||
workletNode = new AudioWorkletNode(audioContext, "audio-processor", {
|
||||
numberOfInputs: 1,
|
||||
numberOfOutputs: 1,
|
||||
channelCount: 1
|
||||
});
|
||||
|
||||
// 监听来自AudioWorklet的音频数据
|
||||
workletNode.port.onmessage = (event) => {
|
||||
if (event.data.type === "audiodata") {
|
||||
@@ -116,8 +119,14 @@ export const useAsrStore = defineStore("asr", () => {
|
||||
const stopRecording = () => {
|
||||
if (!isRecording.value) return;
|
||||
|
||||
const messageId = `voice_${Date.now()}`;
|
||||
// 通知后端录音结束
|
||||
sendMessage(JSON.stringify({ type: "asr_end" }));
|
||||
const msg: Record<string, any> = { type: "asr_end" };
|
||||
if (router.currentRoute.value.path === "/voice") {
|
||||
msg.messageId = messageId;
|
||||
msg.voiceConversation = true;
|
||||
}
|
||||
sendMessage(JSON.stringify(msg));
|
||||
|
||||
// 停止所有音轨
|
||||
if (mediaStreamSource?.mediaStream) {
|
||||
|
||||
@@ -7,7 +7,9 @@ import type {
|
||||
import { ChatService } from "@/services";
|
||||
|
||||
export const useChatStore = defineStore("chat", () => {
|
||||
const router = useRouter();
|
||||
const token = "sk-fkGVZBrAqvIxLjlF3b5f19EfBb63486c90Fa5a1fBd7076Ee";
|
||||
|
||||
// 默认模型
|
||||
const modelInfo = ref<ModelInfo | null>(null);
|
||||
// 历史消息
|
||||
@@ -16,32 +18,35 @@ export const useChatStore = defineStore("chat", () => {
|
||||
const completing = ref<boolean>(false);
|
||||
// 是否正在思考
|
||||
const thinking = ref<boolean>(false);
|
||||
// 模型列表
|
||||
const modelList = ref<ModelListInfo[]>([]);
|
||||
// 在线人数
|
||||
const onlineCount = ref<number>(0);
|
||||
|
||||
// 生成消息ID方法
|
||||
const generateMessageId = () => new Date().getTime().toString();
|
||||
|
||||
// 获取最后一条消息
|
||||
const getLastMessage = () =>
|
||||
historyMessages.value[historyMessages.value.length - 1];
|
||||
|
||||
// 与 LLM 聊天
|
||||
const chatWithLLM = async (
|
||||
request: IChatWithLLMRequest,
|
||||
onProgress: (content: string) => void, // 接收内容进度回调
|
||||
getUsageInfo: (object: UsageInfo) => void = () => {}, // 接收使用信息回调
|
||||
getThinking: (thinkingContent: string) => void = () => {} // 接收思维链内容回调
|
||||
onProgress: (content: string) => void,
|
||||
getUsageInfo: (object: UsageInfo) => void = () => {},
|
||||
getThinking: (thinkingContent: string) => void = () => {}
|
||||
) => {
|
||||
if (completing.value) throw new Error("正在响应中");
|
||||
|
||||
completing.value = true; // 开始请求
|
||||
completing.value = true;
|
||||
try {
|
||||
await ChatService.ChatWithLLM(
|
||||
token,
|
||||
request,
|
||||
(content) => {
|
||||
onProgress(content);
|
||||
},
|
||||
(object: UsageInfo) => {
|
||||
getUsageInfo(object);
|
||||
},
|
||||
(thinkingContent: string) => {
|
||||
getThinking(thinkingContent);
|
||||
}
|
||||
onProgress,
|
||||
getUsageInfo,
|
||||
getThinking
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("请求失败:", error);
|
||||
@@ -51,28 +56,33 @@ export const useChatStore = defineStore("chat", () => {
|
||||
};
|
||||
|
||||
// 添加消息到历史记录
|
||||
const addMessageToHistory = (message: string) => {
|
||||
const addMessageToHistory = (
|
||||
message: string,
|
||||
role: "user" | "assistant" = "user",
|
||||
type: "chat" | "voice" = "chat"
|
||||
) => {
|
||||
const content = message.trim();
|
||||
if (!content) return;
|
||||
|
||||
historyMessages.value.push({
|
||||
role: "user",
|
||||
content
|
||||
role,
|
||||
content,
|
||||
type,
|
||||
id: generateMessageId()
|
||||
});
|
||||
};
|
||||
|
||||
// 清除历史消息
|
||||
const clearHistoryMessages = () => {
|
||||
historyMessages.value = [];
|
||||
const clearHistoryMessages = (type?: "chat" | "voice") => {
|
||||
historyMessages.value = type
|
||||
? historyMessages.value.filter((msg) => msg.type !== type)
|
||||
: [];
|
||||
};
|
||||
|
||||
// 确保最后一条消息是助手消息,如果最后一条消息不是,就加一条空的占位,不然后面的思维链会丢失
|
||||
// 确保最后一条消息是助手消息
|
||||
const ensureAssistantMessage = () => {
|
||||
if (
|
||||
historyMessages.value.length === 0 ||
|
||||
historyMessages.value[historyMessages.value.length - 1].role !==
|
||||
"assistant"
|
||||
) {
|
||||
const lastMessage = getLastMessage();
|
||||
if (!lastMessage || lastMessage.role !== "assistant") {
|
||||
historyMessages.value.push({
|
||||
role: "assistant",
|
||||
content: ""
|
||||
@@ -80,57 +90,57 @@ export const useChatStore = defineStore("chat", () => {
|
||||
}
|
||||
};
|
||||
|
||||
// 处理聊天响应的逻辑
|
||||
const handleChatResponse = async (
|
||||
messages: IChatWithLLMRequest["messages"]
|
||||
) => {
|
||||
if (!modelInfo.value) return;
|
||||
|
||||
// 过滤出type为chat的聊天消息
|
||||
const filteredMessages = computed(() =>
|
||||
messages.filter((msg) => msg.type === "chat" || !msg.type)
|
||||
);
|
||||
|
||||
await chatWithLLM(
|
||||
{ messages: filteredMessages.value, model: modelInfo.value.model_id },
|
||||
// 处理文本内容
|
||||
(content) => {
|
||||
ensureAssistantMessage();
|
||||
thinking.value = false;
|
||||
getLastMessage().content = content;
|
||||
},
|
||||
// 处理使用信息
|
||||
(usageInfo: UsageInfo) => {
|
||||
const lastMessage = getLastMessage();
|
||||
if (lastMessage?.role === "assistant") {
|
||||
lastMessage.usage = usageInfo;
|
||||
}
|
||||
},
|
||||
// 处理思维链
|
||||
(thinkingContent: string) => {
|
||||
ensureAssistantMessage();
|
||||
thinking.value = true;
|
||||
getLastMessage().thinking = thinkingContent;
|
||||
}
|
||||
);
|
||||
|
||||
// 设置消息ID
|
||||
getLastMessage().id = generateMessageId();
|
||||
};
|
||||
|
||||
watch(
|
||||
historyMessages,
|
||||
(newVal) => {
|
||||
// 当历史消息变化时,发送请求
|
||||
if (newVal.length > 0) {
|
||||
if (newVal.length > 0 && router.currentRoute.value.path === "/") {
|
||||
const lastMessage = newVal[newVal.length - 1];
|
||||
if (lastMessage.role === "user" && modelInfo.value) {
|
||||
chatWithLLM(
|
||||
{
|
||||
messages: newVal,
|
||||
model: modelInfo.value?.model_id
|
||||
},
|
||||
// 处理进度回调,文本
|
||||
(content) => {
|
||||
ensureAssistantMessage();
|
||||
thinking.value = false;
|
||||
historyMessages.value[historyMessages.value.length - 1].content =
|
||||
content;
|
||||
},
|
||||
// 处理使用usage信息回调
|
||||
(usageInfo: UsageInfo) => {
|
||||
// 如果最后一条消息是助手的回复,则更新使用信息
|
||||
if (
|
||||
historyMessages.value.length > 0 &&
|
||||
historyMessages.value[historyMessages.value.length - 1].role ===
|
||||
"assistant"
|
||||
) {
|
||||
historyMessages.value[historyMessages.value.length - 1].usage =
|
||||
usageInfo;
|
||||
}
|
||||
},
|
||||
// 处理思维链内容回调
|
||||
(thinkingContent: string) => {
|
||||
ensureAssistantMessage();
|
||||
thinking.value = true;
|
||||
historyMessages.value[historyMessages.value.length - 1].thinking =
|
||||
thinkingContent;
|
||||
}
|
||||
).then(() => {
|
||||
historyMessages.value[historyMessages.value.length - 1].id =
|
||||
new Date().getTime().toString();
|
||||
});
|
||||
if (lastMessage.role === "user") {
|
||||
handleChatResponse(newVal);
|
||||
}
|
||||
}
|
||||
},
|
||||
{ deep: true }
|
||||
);
|
||||
|
||||
// 模型列表
|
||||
const modelList = ref<ModelListInfo[]>([]);
|
||||
|
||||
// 获取模型列表
|
||||
const getModelList = async () => {
|
||||
try {
|
||||
@@ -144,14 +154,14 @@ export const useChatStore = defineStore("chat", () => {
|
||||
return {
|
||||
token,
|
||||
completing,
|
||||
chatWithLLM,
|
||||
thinking,
|
||||
modelInfo,
|
||||
modelList,
|
||||
historyMessages,
|
||||
chatWithLLM,
|
||||
addMessageToHistory,
|
||||
clearHistoryMessages,
|
||||
getModelList,
|
||||
modelList,
|
||||
modelInfo,
|
||||
onlineCount,
|
||||
thinking
|
||||
onlineCount
|
||||
};
|
||||
});
|
||||
|
||||
@@ -2,4 +2,3 @@ export * from "./asr_store";
|
||||
export * from "./chat_store";
|
||||
export * from "./layout_store";
|
||||
export * from "./tts_store";
|
||||
export * from "./voice_store";
|
||||
|
||||
@@ -1,293 +0,0 @@
|
||||
import { useWebSocketStore } from "@/services";
|
||||
import { useChatStore, useTtsStore } from "@/stores";
|
||||
|
||||
export const useVoiceStore = defineStore("voice", () => {
|
||||
// 状态管理
|
||||
const isListening = ref(false); // 是否正在监听语音输入
|
||||
const isProcessing = ref(false); // 是否正在处理(包括ASR、LLM、TTS全流程)
|
||||
const currentSessionId = ref<string | null>(null); // 当前会话ID
|
||||
|
||||
// 依赖的其他store
|
||||
const chatStore = useChatStore();
|
||||
const ttsStore = useTtsStore();
|
||||
const wsStore = useWebSocketStore();
|
||||
|
||||
// 语音消息历史
|
||||
const voiceMessages = ref<
|
||||
{
|
||||
id: string;
|
||||
type: "user" | "assistant";
|
||||
text: string;
|
||||
audioId?: string;
|
||||
timestamp: number;
|
||||
isProcessing?: boolean;
|
||||
}[]
|
||||
>([]);
|
||||
|
||||
// ASR缓冲区状态
|
||||
const isRecording = ref(false);
|
||||
const recordingStartTime = ref<number | null>(null);
|
||||
const recordingMaxDuration = 60 * 1000; // 最大录音时长 60 秒
|
||||
|
||||
/**
|
||||
* 开始语音输入
|
||||
*/
|
||||
const startListening = async () => {
|
||||
if (isListening.value) return;
|
||||
|
||||
try {
|
||||
await wsStore.connect();
|
||||
|
||||
// 创建新的会话ID
|
||||
currentSessionId.value = new Date().getTime().toString();
|
||||
isListening.value = true;
|
||||
isRecording.value = true;
|
||||
recordingStartTime.value = Date.now();
|
||||
|
||||
// 开始录音 - 假设我们有一个 startRecording 方法
|
||||
// 这里通常会调用浏览器的 MediaRecorder API
|
||||
await startRecording();
|
||||
|
||||
console.log("开始语音输入");
|
||||
} catch (error) {
|
||||
console.error("启动语音输入失败:", error);
|
||||
stopListening();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* 停止语音输入
|
||||
*/
|
||||
const stopListening = async () => {
|
||||
if (!isListening.value) return;
|
||||
|
||||
try {
|
||||
// 停止录音
|
||||
if (isRecording.value) {
|
||||
await stopRecording();
|
||||
isRecording.value = false;
|
||||
}
|
||||
|
||||
isListening.value = false;
|
||||
recordingStartTime.value = null;
|
||||
|
||||
// 发送结束信号
|
||||
wsStore.send(JSON.stringify({ type: "asr_end" }));
|
||||
console.log("停止语音输入,等待ASR结果");
|
||||
} catch (error) {
|
||||
console.error("停止语音输入失败:", error);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* 录音时间检查
|
||||
*/
|
||||
const checkRecordingTime = () => {
|
||||
if (isRecording.value && recordingStartTime.value) {
|
||||
const currentTime = Date.now();
|
||||
const duration = currentTime - recordingStartTime.value;
|
||||
|
||||
if (duration >= recordingMaxDuration) {
|
||||
console.log("录音达到最大时长,自动停止");
|
||||
stopListening();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// 定时检查录音时间
|
||||
let recordingTimer: any = null;
|
||||
watch(isRecording, (newVal) => {
|
||||
if (newVal) {
|
||||
recordingTimer = setInterval(checkRecordingTime, 1000);
|
||||
} else if (recordingTimer) {
|
||||
clearInterval(recordingTimer);
|
||||
recordingTimer = null;
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* 处理ASR结果
|
||||
*/
|
||||
const handleASRResult = async (text: string) => {
|
||||
if (!text.trim()) return;
|
||||
|
||||
console.log("收到ASR结果:", text);
|
||||
isProcessing.value = true;
|
||||
|
||||
// 添加用户消息
|
||||
const userMessageId = new Date().getTime().toString();
|
||||
voiceMessages.value.push({
|
||||
id: userMessageId,
|
||||
type: "user",
|
||||
text,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
// 添加助手消息占位
|
||||
const assistantMessageId = new Date().getTime().toString();
|
||||
voiceMessages.value.push({
|
||||
id: assistantMessageId,
|
||||
type: "assistant",
|
||||
text: "",
|
||||
timestamp: Date.now(),
|
||||
isProcessing: true
|
||||
});
|
||||
|
||||
// 调用LLM生成回复
|
||||
await generateLLMResponse(text, assistantMessageId);
|
||||
};
|
||||
|
||||
/**
|
||||
* 生成LLM回复
|
||||
*/
|
||||
const generateLLMResponse = async (userInput: string, responseId: string) => {
|
||||
try {
|
||||
console.log("生成LLM回复...");
|
||||
|
||||
// 构建消息历史
|
||||
const messages = [
|
||||
...voiceMessages.value
|
||||
.filter((msg) => !msg.isProcessing)
|
||||
.map((msg) => ({
|
||||
role: msg.type === "user" ? "user" : "assistant",
|
||||
content: msg.text
|
||||
})),
|
||||
{ role: "user", content: userInput }
|
||||
];
|
||||
|
||||
let responseText = "";
|
||||
|
||||
// 调用ChatStore的聊天方法
|
||||
await chatStore.chatWithLLM(
|
||||
{
|
||||
messages,
|
||||
model: chatStore.modelInfo?.model_id || ""
|
||||
},
|
||||
// 处理流式回复
|
||||
(content) => {
|
||||
responseText = content;
|
||||
// 更新助手消息
|
||||
const index = voiceMessages.value.findIndex(
|
||||
(msg) => msg.id === responseId
|
||||
);
|
||||
if (index !== -1) {
|
||||
voiceMessages.value[index].text = content;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// LLM生成完成,转换为语音
|
||||
console.log("LLM回复生成完成:", responseText);
|
||||
await synthesizeSpeech(responseText, responseId);
|
||||
} catch (error) {
|
||||
console.error("生成LLM回复失败:", error);
|
||||
const index = voiceMessages.value.findIndex(
|
||||
(msg) => msg.id === responseId
|
||||
);
|
||||
if (index !== -1) {
|
||||
voiceMessages.value[index].text = "抱歉,生成回复时出错";
|
||||
voiceMessages.value[index].isProcessing = false;
|
||||
}
|
||||
isProcessing.value = false;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* 转换文本为语音
|
||||
*/
|
||||
const synthesizeSpeech = async (text: string, messageId: string) => {
|
||||
try {
|
||||
console.log("转换文本为语音...");
|
||||
|
||||
// 调用TTS生成语音
|
||||
await ttsStore.convertText(text, messageId);
|
||||
|
||||
// 注意:TTS音频生成完成后会自动播放
|
||||
// 这部分逻辑在TTS Store的finishConversion方法中处理
|
||||
|
||||
// 更新消息状态
|
||||
const index = voiceMessages.value.findIndex(
|
||||
(msg) => msg.id === messageId
|
||||
);
|
||||
if (index !== -1) {
|
||||
voiceMessages.value[index].audioId = messageId;
|
||||
voiceMessages.value[index].isProcessing = false;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("转换文本为语音失败:", error);
|
||||
const index = voiceMessages.value.findIndex(
|
||||
(msg) => msg.id === messageId
|
||||
);
|
||||
if (index !== -1) {
|
||||
voiceMessages.value[index].isProcessing = false;
|
||||
}
|
||||
} finally {
|
||||
isProcessing.value = false;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* 清除所有消息
|
||||
*/
|
||||
const clearMessages = () => {
|
||||
voiceMessages.value = [];
|
||||
};
|
||||
|
||||
/**
|
||||
* 播放指定消息的语音
|
||||
*/
|
||||
const playMessageAudio = async (messageId: string) => {
|
||||
const message = voiceMessages.value.find((msg) => msg.id === messageId);
|
||||
if (message && message.audioId) {
|
||||
await ttsStore.play(message.audioId);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* 暂停当前播放的语音
|
||||
*/
|
||||
const pauseAudio = () => {
|
||||
ttsStore.pauseAll();
|
||||
};
|
||||
|
||||
// 录音相关方法 - 这里需要根据实际情况实现
|
||||
// 通常会使用MediaRecorder API
|
||||
const startRecording = async () => {
|
||||
// 实现录音开始逻辑
|
||||
// 1. 获取麦克风权限
|
||||
// 2. 创建MediaRecorder
|
||||
// 3. 监听数据可用事件,发送到WebSocket
|
||||
console.log("开始录音...");
|
||||
};
|
||||
|
||||
const stopRecording = async () => {
|
||||
// 实现录音停止逻辑
|
||||
console.log("停止录音...");
|
||||
};
|
||||
|
||||
// 在组件卸载时清理资源
|
||||
onUnmounted(() => {
|
||||
if (isRecording.value) {
|
||||
stopRecording();
|
||||
}
|
||||
if (recordingTimer) {
|
||||
clearInterval(recordingTimer);
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
// 状态
|
||||
isListening,
|
||||
isProcessing,
|
||||
isRecording,
|
||||
voiceMessages,
|
||||
|
||||
// 方法
|
||||
startListening,
|
||||
stopListening,
|
||||
handleASRResult,
|
||||
clearMessages,
|
||||
playMessageAudio,
|
||||
pauseAudio
|
||||
};
|
||||
});
|
||||
23
web/src/utils/clipboard.ts
Normal file
23
web/src/utils/clipboard.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
const leagacyCopy = (text: string) => {
|
||||
const input = document.createElement("input");
|
||||
input.value = text;
|
||||
document.body.appendChild(input);
|
||||
input.select();
|
||||
try {
|
||||
document.execCommand("copy");
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
}
|
||||
document.body.removeChild(input);
|
||||
};
|
||||
|
||||
export const copy = (text: string) => {
|
||||
if (navigator.clipboard && navigator.clipboard.writeText) {
|
||||
navigator.clipboard.writeText(text).catch((err) => {
|
||||
console.error(err);
|
||||
leagacyCopy(text); // 如果现代API失败,使用旧方法
|
||||
});
|
||||
} else {
|
||||
leagacyCopy(text); // 如果没有现代API,使用旧方法
|
||||
}
|
||||
};
|
||||
@@ -1,4 +1,5 @@
|
||||
export * from "./audio";
|
||||
export * from "./clipboard";
|
||||
export * from "./context";
|
||||
export * from "./format";
|
||||
export * from "./media";
|
||||
|
||||
@@ -4,6 +4,7 @@ import type { Message } from "@/interfaces";
|
||||
import { throttle } from "lodash-es";
|
||||
import AIAvatar from "@/assets/ai_avatar.png";
|
||||
import {
|
||||
DocumentDuplicateIcon,
|
||||
ExclamationTriangleIcon,
|
||||
microphone,
|
||||
PaperAirplaneIcon,
|
||||
@@ -12,6 +13,7 @@ import {
|
||||
import UserAvatar from "@/assets/user_avatar.jpg";
|
||||
import markdown from "@/components/markdown.vue";
|
||||
import { useAsrStore, useChatStore, useLayoutStore } from "@/stores";
|
||||
import { copy } from "@/utils";
|
||||
|
||||
const chatStore = useChatStore();
|
||||
const { historyMessages, completing, modelList, modelInfo, thinking } =
|
||||
@@ -29,6 +31,11 @@ const collapseActive = ref<string[]>(
|
||||
historyMessages.value.map((msg, idx) => String(msg.id ?? idx))
|
||||
);
|
||||
|
||||
// 过滤出type为chat的聊天消息
|
||||
const filteredMessages = computed(() =>
|
||||
historyMessages.value.filter((msg) => msg.type === "chat" || !msg.type)
|
||||
);
|
||||
|
||||
const getName = (msg: Message, idx: number) => String(msg.id ?? idx);
|
||||
|
||||
// TODO: bugfix: 未能正确展开
|
||||
@@ -148,7 +155,7 @@ onMounted(() => {
|
||||
</div>
|
||||
<!-- 默认消息↑ 历史消息↓ -->
|
||||
<div
|
||||
v-for="(msg, idx) in historyMessages"
|
||||
v-for="(msg, idx) in filteredMessages"
|
||||
:key="idx"
|
||||
class="flex items-start mb-4"
|
||||
>
|
||||
@@ -199,8 +206,18 @@ onMounted(() => {
|
||||
</NCollapse>
|
||||
<!-- 内容↓ 思维链↑ -->
|
||||
<markdown :content="msg.content || ''" />
|
||||
<div v-if="msg.role !== 'user'" class="mt-2">
|
||||
<tts :text="msg.content || ''" :message-id="msg.id!" />
|
||||
<div class="flex items-center gap-2 justify-end mt-2">
|
||||
<div v-if="msg.role !== 'user'">
|
||||
<tts :text="msg.content || ''" :message-id="msg.id!" />
|
||||
</div>
|
||||
<NPopover trigger="hover">
|
||||
<template #trigger>
|
||||
<NButton quaternary circle @click="copy(msg.content || '')">
|
||||
<DocumentDuplicateIcon class="!w-4 !h-4" />
|
||||
</NButton>
|
||||
</template>
|
||||
<span>复制内容</span>
|
||||
</NPopover>
|
||||
</div>
|
||||
<NDivider />
|
||||
</div>
|
||||
@@ -241,7 +258,7 @@ onMounted(() => {
|
||||
:positive-button-props="{ type: 'error' }"
|
||||
positive-text="清除"
|
||||
negative-text="取消"
|
||||
@positive-click="chatStore.clearHistoryMessages"
|
||||
@positive-click="chatStore.clearHistoryMessages('chat')"
|
||||
@negative-click="() => {}"
|
||||
>
|
||||
<template #icon>
|
||||
|
||||
@@ -23,6 +23,11 @@ const collapseActive = ref<string[]>(
|
||||
historyMessages.value.map((msg, idx) => String(msg.id ?? idx))
|
||||
);
|
||||
|
||||
// 过滤出type为voice的聊天消息
|
||||
const filteredMessages = computed(() =>
|
||||
historyMessages.value.filter((msg) => msg.type === "voice")
|
||||
);
|
||||
|
||||
const getName = (msg: Message, idx: number) => String(msg.id ?? idx);
|
||||
|
||||
// TODO: bugfix: 未能正确展开
|
||||
@@ -135,7 +140,7 @@ onMounted(() => {
|
||||
</div>
|
||||
<!-- 默认消息↑ 历史消息↓ -->
|
||||
<div
|
||||
v-for="(msg, idx) in historyMessages"
|
||||
v-for="(msg, idx) in filteredMessages"
|
||||
:key="idx"
|
||||
class="flex items-start mb-4"
|
||||
>
|
||||
@@ -217,7 +222,7 @@ onMounted(() => {
|
||||
:positive-button-props="{ type: 'error' }"
|
||||
positive-text="清除"
|
||||
negative-text="取消"
|
||||
@positive-click="chatStore.clearHistoryMessages"
|
||||
@positive-click="chatStore.clearHistoryMessages('voice')"
|
||||
@negative-click="() => {}"
|
||||
>
|
||||
<template #icon>
|
||||
|
||||
Reference in New Issue
Block a user