From 4258f94fef3dda9df84eda6aaf14cd686c723dbd Mon Sep 17 00:00:00 2001 From: HuangHai <10402852@qq.com> Date: Sun, 29 Jun 2025 21:21:04 +0800 Subject: [PATCH] 'commit' --- dsRag/Start.py | 63 +----------------- dsRag/Test/TestQWen3.py | 10 --- dsRag/Util/ALiYunUtil.py | 49 -------------- dsRag/Util/SearchUtil.py | 56 ---------------- .../__pycache__/ALiYunUtil.cpython-310.pyc | Bin 1911 -> 0 bytes .../__pycache__/SearchUtil.cpython-310.pyc | Bin 5143 -> 3066 bytes 6 files changed, 1 insertion(+), 177 deletions(-) delete mode 100644 dsRag/Test/TestQWen3.py delete mode 100644 dsRag/Util/ALiYunUtil.py delete mode 100644 dsRag/Util/__pycache__/ALiYunUtil.cpython-310.pyc diff --git a/dsRag/Start.py b/dsRag/Start.py index baa259db..478b45dc 100644 --- a/dsRag/Start.py +++ b/dsRag/Start.py @@ -1,4 +1,3 @@ -import asyncio import json import logging import os @@ -15,10 +14,10 @@ import uvicorn from fastapi import FastAPI, HTTPException from openai import AsyncOpenAI from sse_starlette import EventSourceResponse +from starlette.responses import StreamingResponse from starlette.staticfiles import StaticFiles from Config import Config -from Util.ALiYunUtil import ALiYunUtil from Util.SearchUtil import * # 初始化日志 @@ -45,9 +44,6 @@ logger.addHandler(console_handler) async def lifespan(app: FastAPI): - # 初始化阿里云大模型工具 - app.state.aliyun_util = ALiYunUtil() - # 抑制HTTPS相关警告 warnings.filterwarnings('ignore', message='Connecting to .* using TLS with verify_certs=False is insecure') warnings.filterwarnings('ignore', message='Unverified HTTPS request is being made to host') @@ -115,29 +111,8 @@ async def rag(request: fastapi.Request): data = await request.json() query = data.get('query', '') query_tags = data.get('tags', []) - - # 调用es进行混合搜索 - search_results = queryByEs(query, query_tags, logger) - - # 调用大模型 - markdown_content = callLLM(request, query, search_results, logger, False) - - # 如果有正确的结果 - if markdown_content: - return {"data": markdown_content, "format": "markdown"} - - return {"data": "没有在知识库中找到相关的信息,无法回答此问题。"} - - -@app.post("/api/rag_stream", response_model=None) -async def rag_stream(request: fastapi.Request): - data = await request.json() - query = data.get('query', '') - query_tags = data.get('tags', []) - # 调用es进行混合搜索 search_results = queryByEs(query, query_tags, logger) - # 构建提示词 context = "\n".join([ f"结果{i + 1}: {res['tags']['full_content']}" @@ -172,7 +147,6 @@ async def rag_stream(request: fastapi.Request): api_key=Config.MODEL_API_KEY, base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", ) - async def generate_response_stream(): try: # 流式调用大模型 @@ -184,41 +158,6 @@ async def rag_stream(request: fastapi.Request): max_tokens=8000, stream=True # 启用流式模式 ) - - # 流式返回模型生成的回复 - async for chunk in stream: - if chunk.choices[0].delta.content: - yield f"data: {json.dumps({'reply': chunk.choices[0].delta.content}, ensure_ascii=False)}\n\n" - - except Exception as e: - yield f"data: {json.dumps({'error': str(e)})}\n\n" - - return EventSourceResponse(generate_response_stream()) - - -# 与用户交流聊天 -@app.post("/api/helloWorld") -async def reply(): - # 初始化异步 OpenAI 客户端 - client = AsyncOpenAI( - api_key=Config.MODEL_API_KEY, - base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", - ) - - async def generate_response_stream(): - try: - # 流式调用大模型 - stream = await client.chat.completions.create( - model=Config.MODEL_NAME, - messages=[ - {"role": "system", - "content": "你是聊天人的好朋友,你认识深刻,知识渊博,不要使用哎呀这样的语气词。聊天的回复内容不要超过150字。"}, - {"role": "user", "content": "你是谁?"} - ], - max_tokens=4000, - stream=True # 启用流式模式 - ) - # 流式返回模型生成的回复 async for chunk in stream: if chunk.choices[0].delta.content: diff --git a/dsRag/Test/TestQWen3.py b/dsRag/Test/TestQWen3.py deleted file mode 100644 index f7e6f5b1..00000000 --- a/dsRag/Test/TestQWen3.py +++ /dev/null @@ -1,10 +0,0 @@ -from Util.ALiYunUtil import ALiYunUtil - -if __name__ == '__main__': - ali_util = ALiYunUtil() - while True: - prompt = input("请输入问题(输入q退出): ") - if prompt.lower() == 'q': - break - answer = ali_util.chat(prompt) - print("回答:", answer) \ No newline at end of file diff --git a/dsRag/Util/ALiYunUtil.py b/dsRag/Util/ALiYunUtil.py deleted file mode 100644 index 0b912280..00000000 --- a/dsRag/Util/ALiYunUtil.py +++ /dev/null @@ -1,49 +0,0 @@ -from openai import OpenAI -from Config.Config import MODEL_API_KEY, MODEL_NAME - -class ALiYunUtil: - def __init__(self): - self.client = OpenAI( - api_key=MODEL_API_KEY, - base_url="https://dashscope.aliyuncs.com/compatible-mode/v1" - ) - self.model_name = MODEL_NAME - - def chat(self, prompt, model=None): - """ - 与阿里云大模型对话 - :param prompt: 用户输入的问题 - :param model: 可选,指定使用的模型,默认使用Config中的MODEL_NAME - :return: 模型返回的答案 - """ - try: - completion = self.client.chat.completions.create( - model=model or self.model_name, - messages=[ - {'role': 'user', 'content': prompt} - ] - ) - return completion.choices[0].message.content - except Exception as e: - return f"发生错误: {str(e)}" - - def chat_stream(self, prompt, model=None): - """ - 与阿里云大模型流式对话 - :param prompt: 用户输入的问题 - :param model: 可选,指定使用的模型 - :return: 异步生成器,返回模型流式响应 - """ - try: - stream = self.client.chat.completions.create( - model=model or self.model_name, - messages=[ - {'role': 'user', 'content': prompt} - ], - stream=True - ) - for chunk in stream: - if chunk.choices[0].delta.content: - yield chunk.choices[0].delta.content - except Exception as e: - yield f"发生错误: {str(e)}" diff --git a/dsRag/Util/SearchUtil.py b/dsRag/Util/SearchUtil.py index 2052d5bf..1c47d2b2 100644 --- a/dsRag/Util/SearchUtil.py +++ b/dsRag/Util/SearchUtil.py @@ -1,5 +1,3 @@ -from starlette.responses import StreamingResponse - from Config.Config import ES_CONFIG from Util.EsSearchUtil import EsSearchUtil @@ -124,57 +122,3 @@ def queryByEs(query, query_tags,logger): finally: es_search_util.es_pool.release_connection(es_conn) - -def callLLM(request, query, search_results, logger,streamBack=False): - # 调用阿里云大模型整合结果 - aliyun_util = request.app.state.aliyun_util - - # 构建提示词 - context = "\n".join([ - f"结果{i + 1}: {res['tags']['full_content']}" - for i, res in enumerate(search_results['vector_results'] + search_results['text_results']) - ]) - - # 添加图片识别提示 - prompt = f""" - 信息检索与回答助手 - 根据以下关于'{query}'的相关信息: - - 基本信息 - - 语言: 中文 - - 描述: 根据提供的材料检索信息并回答问题 - - 特点: 快速准确提取关键信息,清晰简洁地回答 - - 相关信息 - {context} - - 回答要求 - 1. 严格保持原文中图片与上下文的顺序关系,确保语义相关性 - 2. 图片引用使用Markdown格式: ![图片描述](图片路径) - 3. 使用Markdown格式返回,包含适当的标题、列表和代码块 - 4. 对于提供Latex公式的内容,尽量保留Latex公式 - 5. 直接返回Markdown内容,不要包含额外解释或说明 - 6. 依托给定的资料,快速准确地回答问题,可以添加一些额外的信息,但请勿重复内容 - 7. 如果未提供相关信息,请不要回答 - 8. 如果发现相关信息与原来的问题契合度低,也不要回答 - 9. 确保内容结构清晰,便于前端展示 - """ - - # 调用阿里云大模型 - if len(context) > 0: - # 调用大模型生成回答 - logger.info("正在调用阿里云大模型生成回答...") - - if streamBack: - # SSE流式返回 - async def generate(): - async for chunk in aliyun_util.chat_stream(prompt): - yield f"data: {chunk}\n\n" - # data: 发生错误: object Stream can't be used in 'await' expression - return StreamingResponse(generate(), media_type="text/event-stream") - else: - # 一次性返回 - markdown_content = aliyun_util.chat(prompt) - logger.info(f"调用阿里云大模型生成回答成功完成!") - return markdown_content - return None diff --git a/dsRag/Util/__pycache__/ALiYunUtil.cpython-310.pyc b/dsRag/Util/__pycache__/ALiYunUtil.cpython-310.pyc deleted file mode 100644 index c56fb91877cd85000f7e2d2cf7d6b62593368821..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1911 zcmchXO>7%Q6o6;;KTe#aDFj+jvABRGNK{V9LWrn|P(_ zkd~Qzh9s2+_>d&^l~YA4KXg1P`00(N@DvTtKI>So z<_gRj{OBmWoN5$pecGDwwX=q6=_T$YWiCJ_j_=|trej-qk7E&j4BZ?(Z{#dEAj!3y z$p?gY!q#7}45ucTJ67P+@OPRQvX<$=>|JAO0$*yP^?n0I$ffIc-u84|245F1Q=s_M zc4AWbV4Ll(-KdGx><;uor|?uJVNyO+Az6~8Ss^P{gklU<5~CG3r>})6%ETpMR;-9w znMsF8R>`VkaAFeiw3*i^nFS8YaVC2m73mVaL2e3TL?PpZDYMcA=}hzgJUvg6YUwx3 zoM?`%hhH>SHXD~0w;o*y?tWXle>=GKd9eONef{R1O{!>cBbO+0ID0RZ*sk8MEqq`9 z`D*a#z3uDsjjz@kcUJZrho_uM1k3A?*;1uNnedEWK`r6${ zf2@$7w5PTnu7W{}&sJn^c_p4tB_ikgW;M8R3rx3HtF_w;duF~=axMNq@FiYwEMGMX zc@J(MFp{I$H>Q~RYZGVbUh9%;zj#Pbb!1M7qM1QpIRY* zk?4Y5{xk@jxcwjmHAXe63BqG#r&sBwhp9FvJTCXNA%=B6K^JtCkS-I0EP}X5%E>4A z>fFy4la@pzY?yp_U{+T?Ep_>+OZ9;N_)a>O56jUj>R ztb9R6+h`UtsFi-j`u>x_+IJU&jphG~#JyTlyO0GNpVU_G0Ts1{FN16M0o4e&2)<@@ zSC@iE)jeO1DDfT;?^U&w)rPAxQjrR3JtC1rWY$W=i>KegqZR$66c#x&VeArN@2uMG(SVizq=<{?^oP z+DnBwWv5#_60z04kF92iBpppE*!Zw}>bf7(b@-%84$ixD{and#q8%Q`I*?+H8QaPV z3k(^@28bQm#ZOHm8jY#leH^uzM1jtqH|(&Br{hs)7>NGDrXDu)(E8tk``Q~(VQ=Cm N?&8Qoso$Rx(Bj zl%!Ua#FrMQ7R6`g6_l0~L#zcEc8k*{JvA@2qM*pGV6qrzKN~YhAJgR3oMw!2lOJ)q zFmg{;=W^mx1PLpFG%#isNlh;2GH2wRyq?QL3hXR-kSxTw!qU{DN~cQKV%Eum-0LOu zfiixYtWmtq`FUxX>3R?fXu&~YaWk-8AQjMHfV03xaoFVMrJJcVN#|5 delta 2463 zcmZuz>r)d~6u-Nh7fZm`SVgUkZKujuuyUBDMzx6|>qdj*wtT^>%_U69M@BYrY=bm%_ z+0egZr@x?pMNs}Y9t+Ou=ZQN%G`Q;oUOOd-z1nIovu!m*r{ zvqh*%E09vevFs* z71P2dGQ-9fZ5!8Xh(sivcchy4@mFevG9mt#EoL#1MNB-LS3-RhpUyi;+2jA_9WVTt z41YodD37o?Ud|q**m#6Jy-7oIkXaq>a)1}=j@NxHTH^|v4DErLQ*JZV%f$u_D}2Oy z;&rHk=pwpEU8LI(M~yHX%`p)qnSwNDDnd}3i_ivil{yLEDAi!*EJ11jan_(2YFonG zMoH$N`6|WbT`~#Z1ep=A`Y1)Aph@_$1%+Cu^E8*w*(0_Fi)0O2t|G30E9{~xP%9d- zwGv3HCbUWuvQ{7t!BMR~o2FPd1?-j!p-X62z$bnVRW%SlxArHxfoh;R6r?z60|H7k zP-cq21&W}T2$_25%dlCoIbZ=9buO+94GMYCtfxyIX_Mrt7Ax)N172^H+wYTjpMKcI zjDA?VU}2gOcroV!(ACVW9dtAFL}E<~ve28}Sl-lccDuaZ^71pzBVIo=#WH8;h}R=Z zZhuW}nP41Y;1~$8j$DzKMwP~-ayP6#za!5?@xYKe(Tm$3Ds3Ic8f9cw>6(=1#^jj} z-26;VMYjiD0?2mtc0?VRAwwF(X{ywr@214M<;4NmKuL5feYbK1wQcb1?>fzF|77;g%@q)8+pO9&Tk60|kF()n z+!DH`j!ps+j`ab_Z13bU&rW5gS-CZ(PKK4|jd*ZM$9qeDrB()hAVY)UjIJ(|x!$h)NAXaH+B>YYcI!97>2$h5fRvC{6AThN z*TMA>ozQ5cA^IW`efOdIg8dM4PowWq10;Z;7DE%vcZfolEhTz1X3o}IIG5xqb=1== z3kfaXeZeJFiCPwguGxJd;JXAFGvDR))CGK10mMKy=<^?8(;foX1rq)z$KWy@2+9!-U?o3^7sr zFrMs$#C#4;>pf}e047Q*Y|6p&g6DhOU)l6WU1EA2%iYP8o>7M$?= z&U>nz`X9V3Ch|9Yr9Cpp(?A1Rk=)+COKeAK{9M4xml5X?$wSPJ?6jS-F@<&i0U2S? Aq5uE@