|
|
|
@ -1,4 +1,3 @@
|
|
|
|
|
import asyncio
|
|
|
|
|
import json
|
|
|
|
|
import logging
|
|
|
|
|
import os
|
|
|
|
@ -15,10 +14,10 @@ import uvicorn
|
|
|
|
|
from fastapi import FastAPI, HTTPException
|
|
|
|
|
from openai import AsyncOpenAI
|
|
|
|
|
from sse_starlette import EventSourceResponse
|
|
|
|
|
from starlette.responses import StreamingResponse
|
|
|
|
|
from starlette.staticfiles import StaticFiles
|
|
|
|
|
|
|
|
|
|
from Config import Config
|
|
|
|
|
from Util.ALiYunUtil import ALiYunUtil
|
|
|
|
|
from Util.SearchUtil import *
|
|
|
|
|
|
|
|
|
|
# 初始化日志
|
|
|
|
@ -45,9 +44,6 @@ logger.addHandler(console_handler)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def lifespan(app: FastAPI):
|
|
|
|
|
# 初始化阿里云大模型工具
|
|
|
|
|
app.state.aliyun_util = ALiYunUtil()
|
|
|
|
|
|
|
|
|
|
# 抑制HTTPS相关警告
|
|
|
|
|
warnings.filterwarnings('ignore', message='Connecting to .* using TLS with verify_certs=False is insecure')
|
|
|
|
|
warnings.filterwarnings('ignore', message='Unverified HTTPS request is being made to host')
|
|
|
|
@ -115,29 +111,8 @@ async def rag(request: fastapi.Request):
|
|
|
|
|
data = await request.json()
|
|
|
|
|
query = data.get('query', '')
|
|
|
|
|
query_tags = data.get('tags', [])
|
|
|
|
|
|
|
|
|
|
# 调用es进行混合搜索
|
|
|
|
|
search_results = queryByEs(query, query_tags, logger)
|
|
|
|
|
|
|
|
|
|
# 调用大模型
|
|
|
|
|
markdown_content = callLLM(request, query, search_results, logger, False)
|
|
|
|
|
|
|
|
|
|
# 如果有正确的结果
|
|
|
|
|
if markdown_content:
|
|
|
|
|
return {"data": markdown_content, "format": "markdown"}
|
|
|
|
|
|
|
|
|
|
return {"data": "没有在知识库中找到相关的信息,无法回答此问题。"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/api/rag_stream", response_model=None)
|
|
|
|
|
async def rag_stream(request: fastapi.Request):
|
|
|
|
|
data = await request.json()
|
|
|
|
|
query = data.get('query', '')
|
|
|
|
|
query_tags = data.get('tags', [])
|
|
|
|
|
|
|
|
|
|
# 调用es进行混合搜索
|
|
|
|
|
search_results = queryByEs(query, query_tags, logger)
|
|
|
|
|
|
|
|
|
|
# 构建提示词
|
|
|
|
|
context = "\n".join([
|
|
|
|
|
f"结果{i + 1}: {res['tags']['full_content']}"
|
|
|
|
@ -172,7 +147,6 @@ async def rag_stream(request: fastapi.Request):
|
|
|
|
|
api_key=Config.MODEL_API_KEY,
|
|
|
|
|
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
async def generate_response_stream():
|
|
|
|
|
try:
|
|
|
|
|
# 流式调用大模型
|
|
|
|
@ -184,41 +158,6 @@ async def rag_stream(request: fastapi.Request):
|
|
|
|
|
max_tokens=8000,
|
|
|
|
|
stream=True # 启用流式模式
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 流式返回模型生成的回复
|
|
|
|
|
async for chunk in stream:
|
|
|
|
|
if chunk.choices[0].delta.content:
|
|
|
|
|
yield f"data: {json.dumps({'reply': chunk.choices[0].delta.content}, ensure_ascii=False)}\n\n"
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
|
|
|
|
|
|
|
|
|
return EventSourceResponse(generate_response_stream())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 与用户交流聊天
|
|
|
|
|
@app.post("/api/helloWorld")
|
|
|
|
|
async def reply():
|
|
|
|
|
# 初始化异步 OpenAI 客户端
|
|
|
|
|
client = AsyncOpenAI(
|
|
|
|
|
api_key=Config.MODEL_API_KEY,
|
|
|
|
|
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
async def generate_response_stream():
|
|
|
|
|
try:
|
|
|
|
|
# 流式调用大模型
|
|
|
|
|
stream = await client.chat.completions.create(
|
|
|
|
|
model=Config.MODEL_NAME,
|
|
|
|
|
messages=[
|
|
|
|
|
{"role": "system",
|
|
|
|
|
"content": "你是聊天人的好朋友,你认识深刻,知识渊博,不要使用哎呀这样的语气词。聊天的回复内容不要超过150字。"},
|
|
|
|
|
{"role": "user", "content": "你是谁?"}
|
|
|
|
|
],
|
|
|
|
|
max_tokens=4000,
|
|
|
|
|
stream=True # 启用流式模式
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 流式返回模型生成的回复
|
|
|
|
|
async for chunk in stream:
|
|
|
|
|
if chunk.choices[0].delta.content:
|
|
|
|
|