|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import subprocess
|
|
|
|
import tempfile
|
|
|
|
import urllib.parse
|
|
|
|
import uuid
|
|
|
|
import warnings
|
|
|
|
from io import BytesIO
|
|
|
|
from logging.handlers import RotatingFileHandler
|
|
|
|
|
|
|
|
import fastapi
|
|
|
|
import uvicorn
|
|
|
|
from fastapi import FastAPI, HTTPException
|
|
|
|
from starlette.staticfiles import StaticFiles
|
|
|
|
|
|
|
|
from Util.ALiYunUtil import ALiYunUtil
|
|
|
|
from Util.SearchUtil import *
|
|
|
|
|
|
|
|
# 初始化日志
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
logger.setLevel(logging.INFO)
|
|
|
|
|
|
|
|
# 配置日志处理器
|
|
|
|
log_file = os.path.join(os.path.dirname(__file__), 'Logs', 'app.log')
|
|
|
|
os.makedirs(os.path.dirname(log_file), exist_ok=True)
|
|
|
|
|
|
|
|
# 文件处理器
|
|
|
|
file_handler = RotatingFileHandler(
|
|
|
|
log_file, maxBytes=1024 * 1024, backupCount=5, encoding='utf-8')
|
|
|
|
file_handler.setFormatter(logging.Formatter(
|
|
|
|
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
|
|
|
|
|
|
|
# 控制台处理器
|
|
|
|
console_handler = logging.StreamHandler()
|
|
|
|
console_handler.setFormatter(logging.Formatter(
|
|
|
|
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
|
|
|
|
|
|
|
logger.addHandler(file_handler)
|
|
|
|
logger.addHandler(console_handler)
|
|
|
|
|
|
|
|
|
|
|
|
async def lifespan(app: FastAPI):
|
|
|
|
# 初始化阿里云大模型工具
|
|
|
|
app.state.aliyun_util = ALiYunUtil()
|
|
|
|
|
|
|
|
# 抑制HTTPS相关警告
|
|
|
|
warnings.filterwarnings('ignore', message='Connecting to .* using TLS with verify_certs=False is insecure')
|
|
|
|
warnings.filterwarnings('ignore', message='Unverified HTTPS request is being made to host')
|
|
|
|
yield
|
|
|
|
|
|
|
|
|
|
|
|
app = FastAPI(lifespan=lifespan)
|
|
|
|
|
|
|
|
# 挂载静态文件目录
|
|
|
|
app.mount("/static", StaticFiles(directory="Static"), name="static")
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/api/save-word")
|
|
|
|
async def save_to_word(request: fastapi.Request):
|
|
|
|
output_file = None
|
|
|
|
try:
|
|
|
|
# Parse request data
|
|
|
|
try:
|
|
|
|
data = await request.json()
|
|
|
|
markdown_content = data.get('markdown_content', '')
|
|
|
|
if not markdown_content:
|
|
|
|
raise ValueError("Empty MarkDown content")
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(f"Request parsing failed: {str(e)}")
|
|
|
|
raise HTTPException(status_code=400, detail=f"Invalid request: {str(e)}")
|
|
|
|
|
|
|
|
# 创建临时Markdown文件
|
|
|
|
temp_md = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex + ".md")
|
|
|
|
with open(temp_md, "w", encoding="utf-8") as f:
|
|
|
|
f.write(markdown_content)
|
|
|
|
|
|
|
|
# 使用pandoc转换
|
|
|
|
output_file = os.path.join(tempfile.gettempdir(), "【理想大模型】问答.docx")
|
|
|
|
subprocess.run(['pandoc', temp_md, '-o', output_file, '--resource-path=static'], check=True)
|
|
|
|
|
|
|
|
# 读取生成的Word文件
|
|
|
|
with open(output_file, "rb") as f:
|
|
|
|
stream = BytesIO(f.read())
|
|
|
|
|
|
|
|
# 返回响应
|
|
|
|
encoded_filename = urllib.parse.quote("【理想大模型】问答.docx")
|
|
|
|
return StreamingResponse(
|
|
|
|
stream,
|
|
|
|
media_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
|
|
|
headers={"Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}"})
|
|
|
|
|
|
|
|
except HTTPException:
|
|
|
|
raise
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(f"Unexpected error: {str(e)}")
|
|
|
|
raise HTTPException(status_code=500, detail="Internal server error")
|
|
|
|
finally:
|
|
|
|
# 清理临时文件
|
|
|
|
try:
|
|
|
|
if temp_md and os.path.exists(temp_md):
|
|
|
|
os.remove(temp_md)
|
|
|
|
if output_file and os.path.exists(output_file):
|
|
|
|
os.remove(output_file)
|
|
|
|
except Exception as e:
|
|
|
|
logger.warning(f"Failed to clean up temp files: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/api/rag", response_model=None)
|
|
|
|
async def rag(request: fastapi.Request):
|
|
|
|
data = await request.json()
|
|
|
|
query = data.get('query', '')
|
|
|
|
query_tags = data.get('tags', [])
|
|
|
|
|
|
|
|
# 调用es进行混合搜索
|
|
|
|
search_results = queryByEs(query, query_tags, logger)
|
|
|
|
|
|
|
|
# 调用大模型
|
|
|
|
markdown_content = callLLM(request, query, search_results, logger, False)
|
|
|
|
|
|
|
|
# 如果有正确的结果
|
|
|
|
if markdown_content:
|
|
|
|
return {"data": markdown_content, "format": "markdown"}
|
|
|
|
|
|
|
|
return {"data": "没有在知识库中找到相关的信息,无法回答此问题。"}
|
|
|
|
|
|
|
|
@app.post("/api/helloWorld", response_model=None)
|
|
|
|
async def helloWorld(request: fastapi.Request):
|
|
|
|
async def generate_hello_world():
|
|
|
|
message = "Hello,World,"
|
|
|
|
for char in message:
|
|
|
|
yield f"data: {char}\n\n"
|
|
|
|
import asyncio
|
|
|
|
await asyncio.sleep(0.5)
|
|
|
|
|
|
|
|
return StreamingResponse(generate_hello_world(), media_type="text/event-stream")
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
uvicorn.run(app, host="0.0.0.0", port=8000)
|