|
|
|
@ -123,6 +123,36 @@ async def rag(request: fastapi.Request):
|
|
|
|
|
|
|
|
|
|
return {"data": "没有在知识库中找到相关的信息,无法回答此问题。"}
|
|
|
|
|
|
|
|
|
|
@app.post("/api/rag_stream", response_model=None)
|
|
|
|
|
async def rag_stream(request: fastapi.Request):
|
|
|
|
|
data = await request.json()
|
|
|
|
|
query = data.get('query', '')
|
|
|
|
|
query_tags = data.get('tags', [])
|
|
|
|
|
|
|
|
|
|
# 调用es进行混合搜索
|
|
|
|
|
search_results = queryByEs(query, query_tags, logger)
|
|
|
|
|
|
|
|
|
|
# 流式调用大模型
|
|
|
|
|
return StreamingResponse(
|
|
|
|
|
callLLM(request, query, search_results, logger, True),
|
|
|
|
|
media_type="text/event-stream"
|
|
|
|
|
)
|
|
|
|
|
data = await request.json()
|
|
|
|
|
query = data.get('query', '')
|
|
|
|
|
query_tags = data.get('tags', [])
|
|
|
|
|
|
|
|
|
|
# 调用es进行混合搜索
|
|
|
|
|
search_results = queryByEs(query, query_tags, logger)
|
|
|
|
|
|
|
|
|
|
# 调用大模型
|
|
|
|
|
markdown_content = callLLM(request, query, search_results, logger, False)
|
|
|
|
|
|
|
|
|
|
# 如果有正确的结果
|
|
|
|
|
if markdown_content:
|
|
|
|
|
return {"data": markdown_content, "format": "markdown"}
|
|
|
|
|
|
|
|
|
|
return {"data": "没有在知识库中找到相关的信息,无法回答此问题。"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
|
|