158 lines
4.4 KiB
TypeScript
158 lines
4.4 KiB
TypeScript
import type {
|
|
IChatWithLLMRequest,
|
|
ModelInfo,
|
|
ModelListInfo,
|
|
UsageInfo
|
|
} from "@/interfaces";
|
|
import { ChatService } from "@/services";
|
|
|
|
export const useChatStore = defineStore("chat", () => {
|
|
const token = "sk-fkGVZBrAqvIxLjlF3b5f19EfBb63486c90Fa5a1fBd7076Ee";
|
|
// 默认模型
|
|
const modelInfo = ref<ModelInfo | null>(null);
|
|
// 历史消息
|
|
const historyMessages = ref<IChatWithLLMRequest["messages"]>([]);
|
|
// 是否正在响应
|
|
const completing = ref<boolean>(false);
|
|
// 是否正在思考
|
|
const thinking = ref<boolean>(false);
|
|
// 在线人数
|
|
const onlineCount = ref<number>(0);
|
|
|
|
// 与 LLM 聊天
|
|
const chatWithLLM = async (
|
|
request: IChatWithLLMRequest,
|
|
onProgress: (content: string) => void, // 接收内容进度回调
|
|
getUsageInfo: (object: UsageInfo) => void = () => {}, // 接收使用信息回调
|
|
getThinking: (thinkingContent: string) => void = () => {} // 接收思维链内容回调
|
|
) => {
|
|
if (completing.value) throw new Error("正在响应中");
|
|
|
|
completing.value = true; // 开始请求
|
|
try {
|
|
await ChatService.ChatWithLLM(
|
|
token,
|
|
request,
|
|
(content) => {
|
|
onProgress(content);
|
|
},
|
|
(object: UsageInfo) => {
|
|
getUsageInfo(object);
|
|
},
|
|
(thinkingContent: string) => {
|
|
getThinking(thinkingContent);
|
|
}
|
|
);
|
|
} catch (error) {
|
|
console.error("请求失败:", error);
|
|
} finally {
|
|
completing.value = false;
|
|
}
|
|
};
|
|
|
|
// 添加消息到历史记录
|
|
const addMessageToHistory = (message: string) => {
|
|
const content = message.trim();
|
|
if (!content) return;
|
|
|
|
historyMessages.value.push({
|
|
role: "user",
|
|
content
|
|
});
|
|
};
|
|
|
|
// 清除历史消息
|
|
const clearHistoryMessages = () => {
|
|
historyMessages.value = [];
|
|
};
|
|
|
|
// 确保最后一条消息是助手消息,如果最后一条消息不是,就加一条空的占位,不然后面的思维链会丢失
|
|
const ensureAssistantMessage = () => {
|
|
if (
|
|
historyMessages.value.length === 0 ||
|
|
historyMessages.value[historyMessages.value.length - 1].role !==
|
|
"assistant"
|
|
) {
|
|
historyMessages.value.push({
|
|
role: "assistant",
|
|
content: ""
|
|
});
|
|
}
|
|
};
|
|
|
|
watch(
|
|
historyMessages,
|
|
(newVal) => {
|
|
// 当历史消息变化时,发送请求
|
|
if (newVal.length > 0) {
|
|
const lastMessage = newVal[newVal.length - 1];
|
|
if (lastMessage.role === "user" && modelInfo.value) {
|
|
chatWithLLM(
|
|
{
|
|
messages: newVal,
|
|
model: modelInfo.value?.model_id
|
|
},
|
|
// 处理进度回调,文本
|
|
(content) => {
|
|
ensureAssistantMessage();
|
|
thinking.value = false;
|
|
historyMessages.value[historyMessages.value.length - 1].content =
|
|
content;
|
|
},
|
|
// 处理使用usage信息回调
|
|
(usageInfo: UsageInfo) => {
|
|
// 如果最后一条消息是助手的回复,则更新使用信息
|
|
if (
|
|
historyMessages.value.length > 0 &&
|
|
historyMessages.value[historyMessages.value.length - 1].role ===
|
|
"assistant"
|
|
) {
|
|
historyMessages.value[historyMessages.value.length - 1].usage =
|
|
usageInfo;
|
|
}
|
|
},
|
|
// 处理思维链内容回调
|
|
(thinkingContent: string) => {
|
|
ensureAssistantMessage();
|
|
thinking.value = true;
|
|
historyMessages.value[historyMessages.value.length - 1].thinking =
|
|
thinkingContent;
|
|
}
|
|
).then(() => {
|
|
historyMessages.value[historyMessages.value.length - 1].id =
|
|
new Date().getTime().toString();
|
|
});
|
|
}
|
|
}
|
|
},
|
|
{ deep: true }
|
|
);
|
|
|
|
// 模型列表
|
|
const modelList = ref<ModelListInfo[]>([]);
|
|
|
|
// 获取模型列表
|
|
const getModelList = async () => {
|
|
try {
|
|
const response = await ChatService.GetModelList();
|
|
modelList.value = response.data.data;
|
|
} catch (error) {
|
|
console.error("获取模型列表失败:", error);
|
|
}
|
|
};
|
|
|
|
return {
|
|
token,
|
|
completing,
|
|
chatWithLLM,
|
|
historyMessages,
|
|
addMessageToHistory,
|
|
clearHistoryMessages,
|
|
getModelList,
|
|
modelList,
|
|
modelInfo,
|
|
onlineCount,
|
|
thinking
|
|
};
|
|
});
|