main
HuangHai 4 weeks ago
parent fe86c88161
commit 7fee5c212e

@ -41,13 +41,14 @@ async def lifespan(app: FastAPI):
# 初始化阿里云大模型工具
app.state.aliyun_util = ALiYunUtil()
yield
app = FastAPI(lifespan=lifespan)
# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="Static"), name="static")
class QueryRequest(BaseModel):
query: str = Field(..., description="用户查询的问题")
documents: List[str] = Field(..., description="用户上传的文档")
@ -114,10 +115,10 @@ async def rag_stream(request: Request):
data = await request.json()
query = data.get('query', '')
query_tags = data.get('tags', [])
# 获取EsSearchUtil实例
es_search_util = EsSearchUtil(ES_CONFIG)
# 执行混合搜索
es_conn = es_search_util.es_pool.get_connection()
try:
@ -125,13 +126,13 @@ async def rag_stream(request: Request):
logger.info(f"\n=== 开始执行查询 ===")
logger.info(f"原始查询文本: {query}")
logger.info(f"查询标签: {query_tags}")
logger.info("\n=== 向量搜索阶段 ===")
logger.info("1. 文本分词和向量化处理中...")
query_embedding = es_search_util.text_to_embedding(query)
logger.info(f"2. 生成的查询向量维度: {len(query_embedding)}")
logger.info(f"3. 前3维向量值: {query_embedding[:3]}")
logger.info("4. 正在执行Elasticsearch向量搜索...")
vector_results = es_conn.search(
index=ES_CONFIG['index_name'],
@ -160,7 +161,7 @@ async def rag_stream(request: Request):
}
)
logger.info(f"5. 向量搜索结果数量: {len(vector_results['hits']['hits'])}")
# 文本精确搜索
logger.info("\n=== 文本精确搜索阶段 ===")
logger.info("1. 正在执行Elasticsearch文本精确搜索...")
@ -187,30 +188,30 @@ async def rag_stream(request: Request):
}
)
logger.info(f"2. 文本搜索结果数量: {len(text_results['hits']['hits'])}")
# 合并结果
logger.info("\n=== 最终搜索结果 ===")
logger.info(f"向量搜索结果: {len(vector_results['hits']['hits'])}")
for i, hit in enumerate(vector_results['hits']['hits'], 1):
logger.info(f" {i}. 文档ID: {hit['_id']}, 相似度分数: {hit['_score']:.2f}")
logger.info(f" 内容: {hit['_source']['user_input']}")
logger.info("文本精确搜索结果:")
for i, hit in enumerate(text_results['hits']['hits']):
logger.info(f" {i+1}. 文档ID: {hit['_id']}, 匹配分数: {hit['_score']:.2f}")
logger.info(f" {i + 1}. 文档ID: {hit['_id']}, 匹配分数: {hit['_score']:.2f}")
logger.info(f" 内容: {hit['_source']['user_input']}")
search_results = {
"vector_results": [hit['_source'] for hit in vector_results['hits']['hits']],
"text_results": [hit['_source'] for hit in text_results['hits']['hits']]
}
# 调用阿里云大模型整合结果
aliyun_util = request.app.state.aliyun_util
# 构建提示词
context = "\n".join([
f"结果{i+1}: {res['tags']['full_content']}"
f"结果{i + 1}: {res['tags']['full_content']}"
for i, res in enumerate(search_results['vector_results'] + search_results['text_results'])
])
@ -237,23 +238,25 @@ async def rag_stream(request: Request):
# 调用阿里云大模型
if len(context) > 0:
# 调用大模型生成回答
logger.info("正在调用阿里云大模型生成回答...")
html_content = aliyun_util.chat(prompt)
logger.info(f"调用阿里云大模型生成回答成功完成!")
return {"data": html_content}
else:
logger.warning(f"未找到查询'{query}'的相关数据tags: {query_tags}")
return {"data": "没有在知识库中找到相关的信息,无法回答此问题。", "debug": {"query": query, "tags": query_tags}}
return {"data": "没有在知识库中找到相关的信息,无法回答此问题。",
"debug": {"query": query, "tags": query_tags}}
except Exception as e:
return {"data": f"生成报告时出错: {str(e)}"}
finally:
es_search_util.es_pool.release_connection(es_conn)
except Exception as e:
logger.error(f"RAG search error: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)

Loading…
Cancel
Save