2025-08-19 07:34:39 +08:00
|
|
|
|
import json
|
2025-08-19 10:51:04 +08:00
|
|
|
|
import logging
|
2025-08-19 07:34:39 +08:00
|
|
|
|
import uuid
|
2025-08-19 10:51:04 +08:00
|
|
|
|
from datetime import datetime
|
2025-08-19 07:34:39 +08:00
|
|
|
|
|
|
|
|
|
import fastapi
|
|
|
|
|
import uvicorn
|
2025-08-19 10:52:13 +08:00
|
|
|
|
from fastapi import FastAPI, HTTPException
|
2025-08-19 07:34:39 +08:00
|
|
|
|
from openai import AsyncOpenAI
|
|
|
|
|
from sse_starlette import EventSourceResponse
|
|
|
|
|
|
|
|
|
|
from Config import Config
|
2025-08-19 10:51:04 +08:00
|
|
|
|
from ElasticSearch.Utils.EsSearchUtil import EsSearchUtil
|
2025-08-19 07:34:39 +08:00
|
|
|
|
|
|
|
|
|
# 初始化日志
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
logger.setLevel(logging.INFO)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 初始化异步 OpenAI 客户端
|
|
|
|
|
client = AsyncOpenAI(
|
2025-08-19 10:51:04 +08:00
|
|
|
|
api_key=Config.ALY_LLM_API_KEY,
|
2025-08-19 10:58:04 +08:00
|
|
|
|
base_url=Config.ALY_LLM_BASE_URL
|
2025-08-19 07:34:39 +08:00
|
|
|
|
)
|
|
|
|
|
|
2025-08-19 10:51:04 +08:00
|
|
|
|
# 初始化 ElasticSearch 工具
|
|
|
|
|
search_util = EsSearchUtil(Config.ES_CONFIG)
|
|
|
|
|
|
2025-08-19 07:34:39 +08:00
|
|
|
|
|
2025-08-19 10:58:04 +08:00
|
|
|
|
def get_system_prompt():
|
|
|
|
|
"""获取系统提示"""
|
|
|
|
|
return """
|
|
|
|
|
你是一位平易近人且教学方法灵活的教师,通过引导学生自主学习来帮助他们掌握知识。
|
|
|
|
|
|
|
|
|
|
严格遵循以下教学规则:
|
|
|
|
|
1. 首先了解学生情况:在开始讲解前,询问学生的年级水平和对询问知识的了解程度。
|
|
|
|
|
2. 基于现有知识构建:将新思想与学生已有的知识联系起来。
|
|
|
|
|
3. 引导而非灌输:使用问题、提示和小步骤,让学生自己发现答案。
|
|
|
|
|
4. 检查和强化:在讲解难点后,确认学生能够重述或应用这些概念。
|
|
|
|
|
5. 变化节奏:混合讲解、提问和互动活动,让教学像对话而非讲座。
|
|
|
|
|
|
|
|
|
|
最重要的是:不要直接给出答案,而是通过合作和基于学生已有知识的引导,帮助学生自己找到答案。
|
|
|
|
|
"""
|
|
|
|
|
|
2025-08-19 11:02:57 +08:00
|
|
|
|
async def lifespan(_: FastAPI):
|
2025-08-19 07:34:39 +08:00
|
|
|
|
yield
|
|
|
|
|
|
|
|
|
|
app = FastAPI(lifespan=lifespan)
|
|
|
|
|
|
2025-08-19 10:52:03 +08:00
|
|
|
|
@app.post("/api/chat")
|
2025-08-19 10:51:04 +08:00
|
|
|
|
async def chat(request: fastapi.Request):
|
|
|
|
|
"""
|
|
|
|
|
根据用户输入的语句,通过关键字和向量两种方式查询相关信息
|
|
|
|
|
然后调用大模型进行回答
|
|
|
|
|
"""
|
2025-08-19 07:34:39 +08:00
|
|
|
|
try:
|
2025-08-19 10:51:04 +08:00
|
|
|
|
data = await request.json()
|
|
|
|
|
user_id = data.get('user_id', 'anonymous')
|
|
|
|
|
query = data.get('query', '')
|
|
|
|
|
|
|
|
|
|
if not query:
|
|
|
|
|
raise HTTPException(status_code=400, detail="查询内容不能为空")
|
|
|
|
|
|
2025-08-19 10:58:04 +08:00
|
|
|
|
# 获取系统提示词
|
|
|
|
|
system_prompt = get_system_prompt()
|
|
|
|
|
|
2025-08-19 10:51:04 +08:00
|
|
|
|
prompt = f"""
|
2025-08-19 10:58:04 +08:00
|
|
|
|
{system_prompt.strip()}
|
|
|
|
|
|
2025-08-19 10:51:04 +08:00
|
|
|
|
用户现在的问题是: '{query}'
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# 5. 流式调用大模型生成回答
|
|
|
|
|
async def generate_response_stream():
|
|
|
|
|
try:
|
|
|
|
|
stream = await client.chat.completions.create(
|
2025-08-19 11:15:40 +08:00
|
|
|
|
model=Config.ALY_LLM_MODEL_NAME,
|
2025-08-19 10:51:04 +08:00
|
|
|
|
messages=[
|
|
|
|
|
{'role': 'user', 'content': prompt}
|
|
|
|
|
],
|
|
|
|
|
max_tokens=8000,
|
|
|
|
|
stream=True
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 收集完整回答用于保存
|
|
|
|
|
full_answer = []
|
|
|
|
|
async for chunk in stream:
|
|
|
|
|
if chunk.choices[0].delta.content:
|
|
|
|
|
full_answer.append(chunk.choices[0].delta.content)
|
|
|
|
|
yield f"data: {json.dumps({'reply': chunk.choices[0].delta.content}, ensure_ascii=False)}\n\n"
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"大模型调用失败: {str(e)}")
|
|
|
|
|
yield f"data: {json.dumps({'error': f'生成回答失败: {str(e)}'})}\n\n"
|
|
|
|
|
|
|
|
|
|
return EventSourceResponse(generate_response_stream())
|
|
|
|
|
|
|
|
|
|
except HTTPException as e:
|
|
|
|
|
logger.error(f"聊天接口错误: {str(e.detail)}")
|
|
|
|
|
raise e
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"聊天接口异常: {str(e)}")
|
|
|
|
|
raise HTTPException(status_code=500, detail=f"处理请求失败: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2025-08-19 07:34:39 +08:00
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
uvicorn.run(app, host="0.0.0.0", port=8000)
|