You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

183 lines
6.1 KiB

3 weeks ago
import asyncio
import json
3 weeks ago
import logging
import os
import subprocess
import tempfile
4 weeks ago
import urllib.parse
3 weeks ago
import uuid
import warnings
from io import BytesIO
from logging.handlers import RotatingFileHandler
4 weeks ago
3 weeks ago
import fastapi
import uvicorn
from fastapi import FastAPI, HTTPException
3 weeks ago
from openai import AsyncOpenAI
from sse_starlette import EventSourceResponse
3 weeks ago
from starlette.staticfiles import StaticFiles
3 weeks ago
from Config import Config
3 weeks ago
from Util.ALiYunUtil import ALiYunUtil
3 weeks ago
from Util.SearchUtil import *
3 weeks ago
4 weeks ago
# 初始化日志
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
4 weeks ago
# 配置日志处理器
log_file = os.path.join(os.path.dirname(__file__), 'Logs', 'app.log')
os.makedirs(os.path.dirname(log_file), exist_ok=True)
# 文件处理器
file_handler = RotatingFileHandler(
3 weeks ago
log_file, maxBytes=1024 * 1024, backupCount=5, encoding='utf-8')
4 weeks ago
file_handler.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# 控制台处理器
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(file_handler)
logger.addHandler(console_handler)
4 weeks ago
async def lifespan(app: FastAPI):
# 初始化阿里云大模型工具
app.state.aliyun_util = ALiYunUtil()
3 weeks ago
4 weeks ago
# 抑制HTTPS相关警告
warnings.filterwarnings('ignore', message='Connecting to .* using TLS with verify_certs=False is insecure')
warnings.filterwarnings('ignore', message='Unverified HTTPS request is being made to host')
4 weeks ago
yield
4 weeks ago
4 weeks ago
app = FastAPI(lifespan=lifespan)
# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="Static"), name="static")
@app.post("/api/save-word")
3 weeks ago
async def save_to_word(request: fastapi.Request):
4 weeks ago
output_file = None
try:
# Parse request data
try:
data = await request.json()
3 weeks ago
markdown_content = data.get('markdown_content', '')
if not markdown_content:
raise ValueError("Empty MarkDown content")
4 weeks ago
except Exception as e:
logger.error(f"Request parsing failed: {str(e)}")
raise HTTPException(status_code=400, detail=f"Invalid request: {str(e)}")
3 weeks ago
# 创建临时Markdown文件
temp_md = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex + ".md")
with open(temp_md, "w", encoding="utf-8") as f:
3 weeks ago
f.write(markdown_content)
4 weeks ago
# 使用pandoc转换
4 weeks ago
output_file = os.path.join(tempfile.gettempdir(), "【理想大模型】问答.docx")
3 weeks ago
subprocess.run(['pandoc', temp_md, '-o', output_file, '--resource-path=static'], check=True)
4 weeks ago
# 读取生成的Word文件
with open(output_file, "rb") as f:
stream = BytesIO(f.read())
# 返回响应
4 weeks ago
encoded_filename = urllib.parse.quote("【理想大模型】问答.docx")
4 weeks ago
return StreamingResponse(
stream,
media_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
headers={"Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}"})
except HTTPException:
raise
except Exception as e:
logger.error(f"Unexpected error: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
finally:
# 清理临时文件
try:
3 weeks ago
if temp_md and os.path.exists(temp_md):
os.remove(temp_md)
4 weeks ago
if output_file and os.path.exists(output_file):
os.remove(output_file)
except Exception as e:
logger.warning(f"Failed to clean up temp files: {str(e)}")
3 weeks ago
@app.post("/api/rag", response_model=None)
async def rag(request: fastapi.Request):
3 weeks ago
data = await request.json()
query = data.get('query', '')
query_tags = data.get('tags', [])
# 调用es进行混合搜索
3 weeks ago
search_results = queryByEs(query, query_tags, logger)
3 weeks ago
3 weeks ago
# 调用大模型
3 weeks ago
markdown_content = callLLM(request, query, search_results, logger, False)
3 weeks ago
# 如果有正确的结果
3 weeks ago
if markdown_content:
return {"data": markdown_content, "format": "markdown"}
3 weeks ago
return {"data": "没有在知识库中找到相关的信息,无法回答此问题。"}
4 weeks ago
3 weeks ago
@app.post("/api/rag_stream", response_model=None)
async def rag_stream(request: fastapi.Request):
data = await request.json()
query = data.get('query', '')
query_tags = data.get('tags', [])
# 调用es进行混合搜索
search_results = queryByEs(query, query_tags, logger)
# 流式调用大模型
# 获取StreamingResponse对象
return callLLM(request, query, search_results, logger, True)
# 与用户交流聊天
@app.post("/api/helloWorld")
async def reply():
# 初始化异步 OpenAI 客户端
client = AsyncOpenAI(
api_key=Config.MODEL_API_KEY,
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)
async def generate_response_stream():
try:
# 流式调用大模型
stream = await client.chat.completions.create(
model=Config.MODEL_NAME,
messages=[
{"role": "system",
"content": "你是聊天人的好朋友,你认识深刻,知识渊博,不要使用哎呀这样的语气词。聊天的回复内容不要超过150字。"},
{"role": "user", "content": "你是谁?"}
],
max_tokens=4000,
stream=True # 启用流式模式
)
# 流式返回模型生成的回复
async for chunk in stream:
if chunk.choices[0].delta.content:
yield f"data: {json.dumps({'reply': chunk.choices[0].delta.content}, ensure_ascii=False)}\n\n"
except Exception as e:
yield f"data: {json.dumps({'error': str(e)})}\n\n"
return EventSourceResponse(generate_response_stream())
3 weeks ago
4 weeks ago
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)