diff --git a/dsLightRag/Config/GoApiConst.py b/dsLightRag/Config/GoApiConst.py new file mode 100644 index 00000000..4dde1049 --- /dev/null +++ b/dsLightRag/Config/GoApiConst.py @@ -0,0 +1,6 @@ +# GPT-3.5 模型 +MODEL_GPT35 = "gpt-3.5-turbo" +# GPT-4 模型 +MODEL_GPT4 = "gpt-4-0613" +# Google的文本生成模型 +MODEL_GEMINI = "gemini-2.5-pro" diff --git a/dsLightRag/Config/__pycache__/GoApiConst.cpython-310.pyc b/dsLightRag/Config/__pycache__/GoApiConst.cpython-310.pyc new file mode 100644 index 00000000..331866d6 Binary files /dev/null and b/dsLightRag/Config/__pycache__/GoApiConst.cpython-310.pyc differ diff --git a/dsLightRag/Test/G1_Gemini.py b/dsLightRag/Test/G1_Gemini.py index a69f587f..3ed52885 100644 --- a/dsLightRag/Test/G1_Gemini.py +++ b/dsLightRag/Test/G1_Gemini.py @@ -1,3 +1,4 @@ +from Config.GoApiConst import MODEL_GPT35, MODEL_GPT4 from Util.GoApiUtil import ModelInteractor # 示例使用 @@ -6,7 +7,7 @@ if __name__ == "__main__": interactor = ModelInteractor() # 使用不同的模型和提示词 - model_name = "gemini-2.5-pro" + model_name = MODEL_GPT4 prompt_text = "请详细介绍一下你自己,分成几个段落来说明" # 发送流式请求 diff --git a/dsLightRag/Util/GoApiUtil.py b/dsLightRag/Util/GoApiUtil.py index eb99ba37..63bee6bc 100644 --- a/dsLightRag/Util/GoApiUtil.py +++ b/dsLightRag/Util/GoApiUtil.py @@ -1,10 +1,7 @@ import json - import requests - from Config.Config import GPTNB_API_KEY - class ModelInteractor: def __init__(self, api_key=GPTNB_API_KEY, api_url="https://goapi.gptnb.ai/v1/chat/completions"): self.api_key = api_key @@ -13,16 +10,16 @@ class ModelInteractor: "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}" } - + def stream_request(self, model, prompt, temperature=0.7): """ 发送流式请求到模型API - + 参数: - model: 模型名称 - prompt: 用户提示词 - temperature: 温度参数,控制输出的随机性 - + 返回: - 无返回值,直接打印流式响应 """ @@ -35,7 +32,7 @@ class ModelInteractor: "temperature": temperature, "stream": True } - + try: response = requests.post( self.api_url, @@ -45,34 +42,54 @@ class ModelInteractor: timeout=30 ) response.raise_for_status() - + print(f"使用模型 {model} 的流式响应内容: ") + buffer = "" for chunk in response.iter_content(chunk_size=None): if chunk: chunk_data = chunk.decode('utf-8', errors='replace') - - for line in chunk_data.splitlines(): - line = line.strip() - if not line: - continue - - if line == 'data: [DONE]': - print("\n流式响应结束") - return - - if line.startswith('data: '): - line = line[6:] - - try: - data = json.loads(line) - if 'choices' in data and len(data['choices']) > 0: - delta = data['choices'][0].get('delta', {}) - content = delta.get('content', '') - if content and content != '\n': - print(content, end='', flush=True) - except json.JSONDecodeError as e: - print(f"[调试] JSON解析错误: {e}, 内容: {line}") - + buffer += chunk_data + + # 处理buffer中的所有完整JSON对象 + while True: + # 查找JSON对象的开始和结束位置 + start_pos = buffer.find('{') + if start_pos == -1: + break # 没有找到JSON开始 + + # 尝试找到匹配的结束括号 + depth = 1 + end_pos = start_pos + 1 + while end_pos < len(buffer) and depth > 0: + if buffer[end_pos] == '{': + depth += 1 + elif buffer[end_pos] == '}': + depth -= 1 + end_pos += 1 + + if depth == 0: + # 找到了完整的JSON对象 + json_str = buffer[start_pos:end_pos] + buffer = buffer[end_pos:] + + try: + data = json.loads(json_str) + if 'choices' in data and len(data['choices']) > 0: + delta = data['choices'][0].get('delta', {}) + content = delta.get('content', '') + if content and content != '\n': + print(content, end='', flush=True) + except json.JSONDecodeError as e: + print(f"[调试] JSON解析错误: {e}, 内容: {json_str}") + else: + # 没有找到完整的JSON对象 + break + + # 检查是否结束 + if 'data: [DONE]' in buffer: + print("\n流式响应结束") + return + except requests.exceptions.RequestException as e: print(f"请求发生错误: {e}") diff --git a/dsLightRag/Util/__pycache__/GoApiUtil.cpython-310.pyc b/dsLightRag/Util/__pycache__/GoApiUtil.cpython-310.pyc index 54f36ab9..f05247a5 100644 Binary files a/dsLightRag/Util/__pycache__/GoApiUtil.cpython-310.pyc and b/dsLightRag/Util/__pycache__/GoApiUtil.cpython-310.pyc differ