|
|
|
@ -1,4 +1,5 @@
|
|
|
|
|
import os
|
|
|
|
|
import subprocess
|
|
|
|
|
import tempfile
|
|
|
|
|
import urllib.parse
|
|
|
|
|
import uuid
|
|
|
|
@ -9,27 +10,18 @@ from typing import List
|
|
|
|
|
|
|
|
|
|
import jieba # 导入 jieba 分词库
|
|
|
|
|
import uvicorn
|
|
|
|
|
from docx import Document
|
|
|
|
|
from fastapi import FastAPI, Request, HTTPException
|
|
|
|
|
from fastapi.staticfiles import StaticFiles
|
|
|
|
|
from gensim.models import KeyedVectors
|
|
|
|
|
from openai import OpenAI
|
|
|
|
|
from pydantic import BaseModel, Field, ValidationError
|
|
|
|
|
from starlette.responses import StreamingResponse
|
|
|
|
|
|
|
|
|
|
from Config import Config
|
|
|
|
|
from Config.Config import MS_MODEL_PATH, MS_MODEL_LIMIT, MS_HOST, MS_PORT, MS_MAX_CONNECTIONS, MS_NPROBE, \
|
|
|
|
|
MS_COLLECTION_NAME
|
|
|
|
|
from Milvus.Utils.MilvusCollectionManager import MilvusCollectionManager
|
|
|
|
|
from Milvus.Utils.MilvusConnectionPool import *
|
|
|
|
|
from Milvus.Utils.MilvusConnectionPool import MilvusConnectionPool
|
|
|
|
|
import subprocess
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 将HTML文件转换为Word文件
|
|
|
|
|
def html_to_word_pandoc(html_file, output_file):
|
|
|
|
|
subprocess.run(['pandoc', html_file, '-o', output_file])
|
|
|
|
|
|
|
|
|
|
from Util.ALiYunUtil import ALiYunUtil
|
|
|
|
|
|
|
|
|
|
# 初始化日志
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
@ -42,6 +34,10 @@ logger.addHandler(handler)
|
|
|
|
|
model = KeyedVectors.load_word2vec_format(MS_MODEL_PATH, binary=False, limit=MS_MODEL_LIMIT)
|
|
|
|
|
logger.info(f"模型加载成功,词向量维度: {model.vector_size}")
|
|
|
|
|
|
|
|
|
|
# 将HTML文件转换为Word文件
|
|
|
|
|
def html_to_word_pandoc(html_file, output_file):
|
|
|
|
|
subprocess.run(['pandoc', html_file, '-o', output_file])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@asynccontextmanager
|
|
|
|
|
async def lifespan(app: FastAPI):
|
|
|
|
@ -52,13 +48,10 @@ async def lifespan(app: FastAPI):
|
|
|
|
|
app.state.collection_manager = MilvusCollectionManager(MS_COLLECTION_NAME)
|
|
|
|
|
app.state.collection_manager.load_collection()
|
|
|
|
|
|
|
|
|
|
# 初始化DeepSeek客户端
|
|
|
|
|
app.state.deepseek_client = OpenAI(
|
|
|
|
|
api_key=Config.DEEPSEEK_API_KEY,
|
|
|
|
|
base_url=Config.DEEPSEEK_URL
|
|
|
|
|
)
|
|
|
|
|
yield
|
|
|
|
|
# 初始化阿里云大模型工具
|
|
|
|
|
app.state.aliyun_util = ALiYunUtil()
|
|
|
|
|
|
|
|
|
|
yield
|
|
|
|
|
# 关闭Milvus连接池
|
|
|
|
|
app.state.milvus_pool.close()
|
|
|
|
|
|
|
|
|
@ -154,17 +147,9 @@ async def generate_stream(client, milvus_pool, collection_manager, query, docume
|
|
|
|
|
5. 确保内容结构清晰,便于前端展示
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
response = client.chat.completions.create(
|
|
|
|
|
model="deepseek-chat",
|
|
|
|
|
messages=[
|
|
|
|
|
{"role": "system", "content": "你是一个专业的文档整理助手"},
|
|
|
|
|
{"role": "user", "content": prompt}
|
|
|
|
|
],
|
|
|
|
|
temperature=0.3,
|
|
|
|
|
stream=False
|
|
|
|
|
)
|
|
|
|
|
# 将返回的html代码保存成文件
|
|
|
|
|
yield {"data": response.choices[0].message.content}
|
|
|
|
|
# 调用阿里云大模型
|
|
|
|
|
html_content = client.chat(prompt)
|
|
|
|
|
yield {"data": html_content}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
yield {"data": f"生成报告时出错: {str(e)}"}
|
|
|
|
|
finally:
|
|
|
|
@ -255,9 +240,9 @@ async def rag_stream(request: Request):
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"请求解析失败: {str(e)}")
|
|
|
|
|
raise HTTPException(status_code=400, detail="无效的请求格式")
|
|
|
|
|
"""RAG+DeepSeek接口"""
|
|
|
|
|
"""RAG+ALiYun接口"""
|
|
|
|
|
async for chunk in generate_stream(
|
|
|
|
|
request.app.state.deepseek_client,
|
|
|
|
|
request.app.state.aliyun_util,
|
|
|
|
|
request.app.state.milvus_pool,
|
|
|
|
|
request.app.state.collection_manager,
|
|
|
|
|
query_request.query,
|
|
|
|
|