You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

108 lines
3.5 KiB

2 weeks ago
import json
import logging
import os
from logging.handlers import RotatingFileHandler
import fastapi
import uvicorn
from fastapi import FastAPI
2 weeks ago
from lightrag import LightRAG
2 weeks ago
from lightrag.kg.shared_storage import initialize_pipeline_status
from raganything import RAGAnything
2 weeks ago
from sse_starlette import EventSourceResponse
from starlette.staticfiles import StaticFiles
2 weeks ago
from Util.RagUtil import create_llm_model_func, create_vision_model_func, create_embedding_func
2 weeks ago
2 weeks ago
import logging
2 weeks ago
2 weeks ago
# 在程序开始时添加以下配置
logging.basicConfig(
level=logging.INFO, # 设置日志级别为INFO
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
2 weeks ago
2 weeks ago
# 或者如果你想更详细地控制日志输出
logger = logging.getLogger('lightrag')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
2 weeks ago
async def lifespan(app: FastAPI):
yield
2 weeks ago
2 weeks ago
async def print_stream(stream):
async for chunk in stream:
if chunk:
print(chunk, end="", flush=True)
app = FastAPI(lifespan=lifespan)
# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="Static"), name="static")
@app.post("/api/rag")
async def rag(request: fastapi.Request):
data = await request.json()
2 weeks ago
topic = data.get("topic") # Chinese, Math
2 weeks ago
# 拼接路径
2 weeks ago
WORKING_PATH = "./Topic/" + topic
2 weeks ago
# 查询的问题
2 weeks ago
query = data.get("query")
2 weeks ago
2 weeks ago
async def generate_response_stream(query: str):
2 weeks ago
# 关闭参考资料
user_prompt = "\n 1、不要输出参考资料 或者 References "
user_prompt = user_prompt + "\n 2、如果问题与提供的知识库内容不符则明确告诉未在知识库范围内提到"
2 weeks ago
try:
2 weeks ago
# 初始化RAG组件
2 weeks ago
llm_model_func = create_llm_model_func(history_messages=[])
2 weeks ago
vision_model_func = create_vision_model_func(llm_model_func)
embedding_func = create_embedding_func()
lightrag_instance = LightRAG(
working_dir=WORKING_PATH,
llm_model_func=llm_model_func,
embedding_func=embedding_func
)
await lightrag_instance.initialize_storages()
await initialize_pipeline_status()
# 创建RAG实例并保存到app.state
app.state.rag = RAGAnything(
lightrag=lightrag_instance,
vision_model_func=vision_model_func,
)
2 weeks ago
# 直接使用app.state中已初始化的rag实例
resp = await app.state.rag.aquery(
2 weeks ago
user_prompt=user_prompt,
2 weeks ago
query=query,
2 weeks ago
mode="hybrid",
stream=True
2 weeks ago
)
2 weeks ago
2 weeks ago
async for chunk in resp:
if not chunk:
continue
yield f"data: {json.dumps({'reply': chunk})}\n\n"
print(chunk, end='', flush=True)
except Exception as e:
yield f"data: {json.dumps({'error': str(e)})}\n\n"
2 weeks ago
logger.error(f"处理查询时出错: {query}. 错误: {str(e)}")
2 weeks ago
finally:
# 清理资源
await app.state.rag.lightrag.finalize_storages()
2 weeks ago
return EventSourceResponse(generate_response_stream(query=query))
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)