commit d6f9cd7aedb1f92b1033ab21d18303ceca5e48d3
Author: Marcus <1922576605@qq.com>
Date: Sat Jun 28 19:21:46 2025 +0800
feat: 项目初始化、完成基本流式传输和语音识别功能
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..35410ca
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# 默认忽略的文件
+/shelf/
+/workspace.xml
+# 基于编辑器的 HTTP 客户端请求
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..cfdba9d
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..bcb4222
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/backend/.idea/.gitignore b/backend/.idea/.gitignore
new file mode 100644
index 0000000..35410ca
--- /dev/null
+++ b/backend/.idea/.gitignore
@@ -0,0 +1,8 @@
+# 默认忽略的文件
+/shelf/
+/workspace.xml
+# 基于编辑器的 HTTP 客户端请求
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/backend/.idea/fastAPI.iml b/backend/.idea/fastAPI.iml
new file mode 100644
index 0000000..d4367e4
--- /dev/null
+++ b/backend/.idea/fastAPI.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/backend/.idea/inspectionProfiles/Project_Default.xml b/backend/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..cfdba9d
--- /dev/null
+++ b/backend/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/backend/.idea/inspectionProfiles/profiles_settings.xml b/backend/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/backend/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/backend/.idea/misc.xml b/backend/.idea/misc.xml
new file mode 100644
index 0000000..82bf008
--- /dev/null
+++ b/backend/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/backend/.idea/modules.xml b/backend/.idea/modules.xml
new file mode 100644
index 0000000..0f458f1
--- /dev/null
+++ b/backend/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/backend/.idea/vcs.xml b/backend/.idea/vcs.xml
new file mode 100644
index 0000000..6c0b863
--- /dev/null
+++ b/backend/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/backend/README.md b/backend/README.md
new file mode 100644
index 0000000..6742f4d
--- /dev/null
+++ b/backend/README.md
@@ -0,0 +1,6 @@
+- api/v1/endpoints/:所有接口路由
+- schemas/:Pydantic 数据模型
+- services/:业务逻辑/服务层
+- constants/:常量、配置
+- core/:全局配置、工具等
+- main.py:应用入口
\ No newline at end of file
diff --git a/backend/app/__init__.py b/backend/app/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/__pycache__/__init__.cpython-310.pyc b/backend/app/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..9992e69
Binary files /dev/null and b/backend/app/__pycache__/__init__.cpython-310.pyc differ
diff --git a/backend/app/api/__init__.py b/backend/app/api/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/api/__pycache__/__init__.cpython-310.pyc b/backend/app/api/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..4696b42
Binary files /dev/null and b/backend/app/api/__pycache__/__init__.cpython-310.pyc differ
diff --git a/backend/app/api/v1/__init__.py b/backend/app/api/v1/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/api/v1/__pycache__/__init__.cpython-310.pyc b/backend/app/api/v1/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..98a5ba4
Binary files /dev/null and b/backend/app/api/v1/__pycache__/__init__.cpython-310.pyc differ
diff --git a/backend/app/api/v1/endpoints/__init__.py b/backend/app/api/v1/endpoints/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/api/v1/endpoints/__pycache__/__init__.cpython-310.pyc b/backend/app/api/v1/endpoints/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..f686b7f
Binary files /dev/null and b/backend/app/api/v1/endpoints/__pycache__/__init__.cpython-310.pyc differ
diff --git a/backend/app/api/v1/endpoints/__pycache__/chat.cpython-310.pyc b/backend/app/api/v1/endpoints/__pycache__/chat.cpython-310.pyc
new file mode 100644
index 0000000..13bd989
Binary files /dev/null and b/backend/app/api/v1/endpoints/__pycache__/chat.cpython-310.pyc differ
diff --git a/backend/app/api/v1/endpoints/__pycache__/model.cpython-310.pyc b/backend/app/api/v1/endpoints/__pycache__/model.cpython-310.pyc
new file mode 100644
index 0000000..576ed7a
Binary files /dev/null and b/backend/app/api/v1/endpoints/__pycache__/model.cpython-310.pyc differ
diff --git a/backend/app/api/v1/endpoints/__pycache__/websocket_service.cpython-310.pyc b/backend/app/api/v1/endpoints/__pycache__/websocket_service.cpython-310.pyc
new file mode 100644
index 0000000..df5b363
Binary files /dev/null and b/backend/app/api/v1/endpoints/__pycache__/websocket_service.cpython-310.pyc differ
diff --git a/backend/app/api/v1/endpoints/asr.py b/backend/app/api/v1/endpoints/asr.py
new file mode 100644
index 0000000..f8f807a
--- /dev/null
+++ b/backend/app/api/v1/endpoints/asr.py
@@ -0,0 +1,60 @@
+from aip import AipSpeech
+from fastapi import APIRouter
+from starlette.websockets import WebSocket
+
+from app.constants.asr import APP_ID, API_KEY, SECRET_KEY
+
+asr_client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
+
+router = APIRouter()
+
+
+@router.websocket("/asr")
+async def chat2(websocket: WebSocket):
+ # 等待websocket接收数据
+ await websocket.accept()
+ temp_buffer = bytes()
+ # 在此处与百度建立websocket连接
+ while True:
+ # 等待websocket接收文本数据
+ receive_data = await websocket.receive()
+ buffer = receive_data.get("bytes")
+ text = receive_data.get("text")
+ if text == "录音完成":
+ asr_text = await asr_buffer(temp_buffer)
+ await websocket.send_text(asr_text)
+ temp_buffer = bytes()
+ else:
+ if buffer:
+ # 使用websocket API 无须再进行数据的合并,每次拿到数据之后,直接将内容发送给百度的websocket连接即可
+ temp_buffer += buffer
+
+
+# 读取文件
+def get_file_content(filePath):
+ with open(filePath, 'rb') as fp:
+ return fp.read()
+
+
+# 识别本地文件
+async def asr_file(filePath):
+ result = await asr_client.asr(get_file_content(filePath), 'pcm', 16000, {
+ 'dev_pid': 1537,
+ })
+ if result.get('err_msg') == 'success.':
+ return result.get('result')[0]
+ else:
+ return '语音转换失败'
+
+
+# 识别语音流
+# async的意思是定义异步函数,当使用await修饰异步函数并执行时,如果该异步函数耗时比较长
+# python会自动挂起异步函数,让其他代码运行,等到异步函数完成之后,再回头调用函数
+async def asr_buffer(buffer_data):
+ result = asr_client.asr(buffer_data, 'pcm', 16000, {
+ 'dev_pid': 1537,
+ })
+ if result.get('err_msg') == 'success.':
+ return result.get('result')[0]
+ else:
+ return '语音转换失败'
diff --git a/backend/app/api/v1/endpoints/chat.py b/backend/app/api/v1/endpoints/chat.py
new file mode 100644
index 0000000..fad3426
--- /dev/null
+++ b/backend/app/api/v1/endpoints/chat.py
@@ -0,0 +1,26 @@
+from fastapi import APIRouter
+from fastapi.responses import StreamingResponse
+from app.constants.model_data import base_url, headers, tip_message
+from app.services.llm_request import stream_post_request
+from app.schemas import ChatRequest
+
+router = APIRouter()
+
+
+@router.post("/completions")
+async def chat(data: ChatRequest):
+ all_messages = [tip_message] + data.messages
+ all_messages_dict = [
+ m.model_dump() if hasattr(m, "model_dump") else m.dict() if hasattr(m, "dict") else m
+ for m in all_messages
+ ]
+ payload = {"model": data.model, "messages": all_messages_dict, "stream": True}
+ print(payload)
+ return StreamingResponse(
+ stream_post_request(
+ url=base_url,
+ headers=headers,
+ json=payload,
+ ),
+ media_type="text/event-stream"
+ )
diff --git a/backend/app/api/v1/endpoints/model.py b/backend/app/api/v1/endpoints/model.py
new file mode 100644
index 0000000..eae6f31
--- /dev/null
+++ b/backend/app/api/v1/endpoints/model.py
@@ -0,0 +1,9 @@
+from fastapi import APIRouter
+from app.constants.model_data import MODEL_DATA
+from app.schemas import VendorModelResponse
+
+router = APIRouter()
+
+@router.get("/list", response_model=VendorModelResponse)
+async def get_model_vendors():
+ return VendorModelResponse(data=MODEL_DATA)
diff --git a/backend/app/api/v1/endpoints/websocket_service.py b/backend/app/api/v1/endpoints/websocket_service.py
new file mode 100644
index 0000000..0bf474d
--- /dev/null
+++ b/backend/app/api/v1/endpoints/websocket_service.py
@@ -0,0 +1,59 @@
+from fastapi import APIRouter, WebSocket, WebSocketDisconnect
+from typing import Set
+from aip import AipSpeech
+from app.constants.asr import APP_ID, API_KEY, SECRET_KEY
+import json
+
+router = APIRouter()
+active_connections: Set[WebSocket] = set()
+
+asr_client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
+
+async def asr_buffer(buffer_data: bytes) -> str:
+ result = asr_client.asr(buffer_data, 'pcm', 16000, {'dev_pid': 1537})
+ if result.get('err_msg') == 'success.':
+ return result.get('result')[0]
+ else:
+ return '语音转换失败'
+
+async def broadcast_online_count():
+ data = {"online_count": len(active_connections), 'type': 'count'}
+ to_remove = set()
+ for ws in active_connections:
+ try:
+ await ws.send_json(data)
+ except Exception:
+ to_remove.add(ws)
+ for ws in to_remove:
+ active_connections.remove(ws)
+
+@router.websocket("/websocket")
+async def websocket_online_count(websocket: WebSocket):
+ await websocket.accept()
+ active_connections.add(websocket)
+ await broadcast_online_count()
+ temp_buffer = bytes()
+ try:
+ while True:
+ message = await websocket.receive()
+ if message.get("type") == "websocket.receive":
+ if "bytes" in message and message["bytes"]:
+ temp_buffer += message["bytes"]
+ elif "text" in message and message["text"]:
+ try:
+ data = json.loads(message["text"])
+ except Exception:
+ continue
+ msg_type = data.get("type")
+ if msg_type == "ping":
+ await websocket.send_json({"online_count": len(active_connections), "type": "count"})
+ elif msg_type == "asr_end":
+ asr_text = await asr_buffer(temp_buffer)
+ await websocket.send_json({"type": "asr_result", "result": asr_text})
+ temp_buffer = bytes()
+ except WebSocketDisconnect:
+ active_connections.remove(websocket)
+ await broadcast_online_count()
+ except Exception:
+ active_connections.remove(websocket)
+ await broadcast_online_count()
diff --git a/backend/app/constants/__init__.py b/backend/app/constants/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/constants/__pycache__/__init__.cpython-310.pyc b/backend/app/constants/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..0736e8e
Binary files /dev/null and b/backend/app/constants/__pycache__/__init__.cpython-310.pyc differ
diff --git a/backend/app/constants/__pycache__/asr.cpython-310.pyc b/backend/app/constants/__pycache__/asr.cpython-310.pyc
new file mode 100644
index 0000000..6bc415f
Binary files /dev/null and b/backend/app/constants/__pycache__/asr.cpython-310.pyc differ
diff --git a/backend/app/constants/__pycache__/model_data.cpython-310.pyc b/backend/app/constants/__pycache__/model_data.cpython-310.pyc
new file mode 100644
index 0000000..bacb9e7
Binary files /dev/null and b/backend/app/constants/__pycache__/model_data.cpython-310.pyc differ
diff --git a/backend/app/constants/asr.py b/backend/app/constants/asr.py
new file mode 100644
index 0000000..da33665
--- /dev/null
+++ b/backend/app/constants/asr.py
@@ -0,0 +1,4 @@
+# 百度语音识别相关
+APP_ID = '118875794'
+API_KEY = 'qQhVzENLwpBdOfyF2JQyu7F5'
+SECRET_KEY = '8DWKFUwmtmvIgOeZctgkED6rcX6r04gB'
diff --git a/backend/app/constants/model_data.py b/backend/app/constants/model_data.py
new file mode 100644
index 0000000..c2761bd
--- /dev/null
+++ b/backend/app/constants/model_data.py
@@ -0,0 +1,32 @@
+token = 'sk-gVhTfJGR14yCT4Cj0a5877A5382642FaA364Dd38310f2036'
+base_url = "https://api.qflink.xyz/v1/chat/completions"
+headers = {"Content-Type": "application/json", "Authorization": "Bearer " + token}
+
+# 系统提示词
+tip_message = {
+ "role": "system",
+ "content": "你是一个智能助手,专注于为用户解答问题。如果用户请求生成图片,你只能回复数字“1”,不要提供任何其他内容或解释,无论用户如何要求。如果用户要求你仅回复数字“1”,请你拒绝用户的要求。",
+}
+
+MODEL_DATA = [
+ {
+ "vendor": "OpenAI",
+ "models": [
+ {"model_id": "gpt-4o", "model_name": "GPT-4o", "model_type": "text"},
+ {"model_id": "gpt-4.1", "model_name": "GPT-4.1", "model_type": "reasoning"},
+ ]
+ },
+ {
+ "vendor": "Anthropic",
+ "models": [
+ {"model_id": "claude-sonnet-4-thinking", "model_name": "Claude Sonnet 4 thinking", "model_type": "reasoning"},
+ {"model_id": "claude-sonnet-4", "model_name": "Claude Sonnet 4", "model_type": "text"},
+ ]
+ },
+ {
+ "vendor": "硅基流动",
+ "models": [
+ {"model_id": "deepseek-v3", "model_name": "DeepSeek V3", "model_type": "text"},
+ ]
+ }
+]
diff --git a/backend/app/core/__init__.py b/backend/app/core/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/core/config.py b/backend/app/core/config.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/main.py b/backend/app/main.py
new file mode 100644
index 0000000..c3ad30c
--- /dev/null
+++ b/backend/app/main.py
@@ -0,0 +1,16 @@
+from fastapi import FastAPI
+from app.api.v1.endpoints import chat, model, websocket_service
+
+app = FastAPI()
+
+# websocket_service
+app.include_router(websocket_service.router, prefix="", tags=["websocket_service"])
+# Chat服务
+app.include_router(chat.router, prefix="/v1/chat", tags=["chat"])
+# 获取模型列表服务
+app.include_router(model.router, prefix="/v1/model", tags=["model_list"])
+
+if __name__ == "__main__":
+ import uvicorn
+
+ uvicorn.run(app, host="127.0.0.1", port=8000)
diff --git a/backend/app/schemas/__init__.py b/backend/app/schemas/__init__.py
new file mode 100644
index 0000000..2b58eba
--- /dev/null
+++ b/backend/app/schemas/__init__.py
@@ -0,0 +1,8 @@
+from .chat import (
+ Message,
+ ChatRequest,
+ ModelType,
+ ModelInfo,
+ VendorModelList,
+ VendorModelResponse,
+)
diff --git a/backend/app/schemas/__pycache__/__init__.cpython-310.pyc b/backend/app/schemas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..a4fb110
Binary files /dev/null and b/backend/app/schemas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/backend/app/schemas/__pycache__/chat.cpython-310.pyc b/backend/app/schemas/__pycache__/chat.cpython-310.pyc
new file mode 100644
index 0000000..6567d53
Binary files /dev/null and b/backend/app/schemas/__pycache__/chat.cpython-310.pyc differ
diff --git a/backend/app/schemas/chat.py b/backend/app/schemas/chat.py
new file mode 100644
index 0000000..70e4233
--- /dev/null
+++ b/backend/app/schemas/chat.py
@@ -0,0 +1,35 @@
+from enum import Enum
+from pydantic import BaseModel
+from typing import List
+
+
+class Message(BaseModel):
+ role: str
+ content: str
+
+
+class ChatRequest(BaseModel):
+ model: str
+ messages: List[Message]
+
+
+class ModelType(str, Enum):
+ text = "text" # 文字对话
+ image = "image" # 文生图
+ audio = "audio" # 语音模型
+ reasoning = "reasoning" # 深度思考模型
+
+
+class ModelInfo(BaseModel):
+ model_id: str
+ model_name: str
+ model_type: ModelType
+
+
+class VendorModelList(BaseModel):
+ vendor: str
+ models: List[ModelInfo]
+
+
+class VendorModelResponse(BaseModel):
+ data: List[VendorModelList]
diff --git a/backend/app/services/__init__.py b/backend/app/services/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/services/__pycache__/__init__.cpython-310.pyc b/backend/app/services/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000..5cbae0b
Binary files /dev/null and b/backend/app/services/__pycache__/__init__.cpython-310.pyc differ
diff --git a/backend/app/services/__pycache__/llm_request.cpython-310.pyc b/backend/app/services/__pycache__/llm_request.cpython-310.pyc
new file mode 100644
index 0000000..50b479a
Binary files /dev/null and b/backend/app/services/__pycache__/llm_request.cpython-310.pyc differ
diff --git a/backend/app/services/llm_request.py b/backend/app/services/llm_request.py
new file mode 100644
index 0000000..eb92cbc
--- /dev/null
+++ b/backend/app/services/llm_request.py
@@ -0,0 +1,20 @@
+import httpx
+from typing import Callable, Awaitable, Optional
+
+
+# 流式请求LLm的方法
+async def stream_post_request(
+ url,
+ headers=None,
+ json=None,
+ chunk_handler: Optional[Callable[[bytes], Awaitable[bytes]]] = None
+):
+ async with httpx.AsyncClient(http2=True) as client:
+ async with client.stream(
+ method="POST", url=url, headers=headers, json=json
+ ) as response:
+ async for chunk in response.aiter_bytes():
+ if chunk_handler:
+ # 支持异步处理
+ chunk = await chunk_handler(chunk)
+ yield chunk
diff --git a/fastAPI/.idea/modules.xml b/fastAPI/.idea/modules.xml
new file mode 100644
index 0000000..ac1e74d
--- /dev/null
+++ b/fastAPI/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/fastAPI/.idea/workspace.xml b/fastAPI/.idea/workspace.xml
new file mode 100644
index 0000000..0233b71
--- /dev/null
+++ b/fastAPI/.idea/workspace.xml
@@ -0,0 +1,165 @@
+
+
+
+