This commit is contained in:
2025-08-15 16:33:05 +08:00
parent c7d7ece6a4
commit 7b149f0f51
5 changed files with 56 additions and 32 deletions

View File

@@ -0,0 +1,6 @@
# GPT-3.5 模型
MODEL_GPT35 = "gpt-3.5-turbo"
# GPT-4 模型
MODEL_GPT4 = "gpt-4-0613"
# Google的文本生成模型
MODEL_GEMINI = "gemini-2.5-pro"

View File

@@ -1,3 +1,4 @@
from Config.GoApiConst import MODEL_GPT35, MODEL_GPT4
from Util.GoApiUtil import ModelInteractor from Util.GoApiUtil import ModelInteractor
# 示例使用 # 示例使用
@@ -6,7 +7,7 @@ if __name__ == "__main__":
interactor = ModelInteractor() interactor = ModelInteractor()
# 使用不同的模型和提示词 # 使用不同的模型和提示词
model_name = "gemini-2.5-pro" model_name = MODEL_GPT4
prompt_text = "请详细介绍一下你自己,分成几个段落来说明" prompt_text = "请详细介绍一下你自己,分成几个段落来说明"
# 发送流式请求 # 发送流式请求

View File

@@ -1,10 +1,7 @@
import json import json
import requests import requests
from Config.Config import GPTNB_API_KEY from Config.Config import GPTNB_API_KEY
class ModelInteractor: class ModelInteractor:
def __init__(self, api_key=GPTNB_API_KEY, api_url="https://goapi.gptnb.ai/v1/chat/completions"): def __init__(self, api_key=GPTNB_API_KEY, api_url="https://goapi.gptnb.ai/v1/chat/completions"):
self.api_key = api_key self.api_key = api_key
@@ -13,16 +10,16 @@ class ModelInteractor:
"Content-Type": "application/json", "Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}" "Authorization": f"Bearer {self.api_key}"
} }
def stream_request(self, model, prompt, temperature=0.7): def stream_request(self, model, prompt, temperature=0.7):
""" """
发送流式请求到模型API 发送流式请求到模型API
参数: 参数:
- model: 模型名称 - model: 模型名称
- prompt: 用户提示词 - prompt: 用户提示词
- temperature: 温度参数,控制输出的随机性 - temperature: 温度参数,控制输出的随机性
返回: 返回:
- 无返回值,直接打印流式响应 - 无返回值,直接打印流式响应
""" """
@@ -35,7 +32,7 @@ class ModelInteractor:
"temperature": temperature, "temperature": temperature,
"stream": True "stream": True
} }
try: try:
response = requests.post( response = requests.post(
self.api_url, self.api_url,
@@ -45,34 +42,54 @@ class ModelInteractor:
timeout=30 timeout=30
) )
response.raise_for_status() response.raise_for_status()
print(f"使用模型 {model} 的流式响应内容: ") print(f"使用模型 {model} 的流式响应内容: ")
buffer = ""
for chunk in response.iter_content(chunk_size=None): for chunk in response.iter_content(chunk_size=None):
if chunk: if chunk:
chunk_data = chunk.decode('utf-8', errors='replace') chunk_data = chunk.decode('utf-8', errors='replace')
buffer += chunk_data
for line in chunk_data.splitlines():
line = line.strip() # 处理buffer中的所有完整JSON对象
if not line: while True:
continue # 查找JSON对象的开始和结束位置
start_pos = buffer.find('{')
if line == 'data: [DONE]': if start_pos == -1:
print("\n流式响应结束") break # 没有找到JSON开始
return
# 尝试找到匹配的结束括号
if line.startswith('data: '): depth = 1
line = line[6:] end_pos = start_pos + 1
while end_pos < len(buffer) and depth > 0:
try: if buffer[end_pos] == '{':
data = json.loads(line) depth += 1
if 'choices' in data and len(data['choices']) > 0: elif buffer[end_pos] == '}':
delta = data['choices'][0].get('delta', {}) depth -= 1
content = delta.get('content', '') end_pos += 1
if content and content != '\n':
print(content, end='', flush=True) if depth == 0:
except json.JSONDecodeError as e: # 找到了完整的JSON对象
print(f"[调试] JSON解析错误: {e}, 内容: {line}") json_str = buffer[start_pos:end_pos]
buffer = buffer[end_pos:]
try:
data = json.loads(json_str)
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
content = delta.get('content', '')
if content and content != '\n':
print(content, end='', flush=True)
except json.JSONDecodeError as e:
print(f"[调试] JSON解析错误: {e}, 内容: {json_str}")
else:
# 没有找到完整的JSON对象
break
# 检查是否结束
if 'data: [DONE]' in buffer:
print("\n流式响应结束")
return
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
print(f"请求发生错误: {e}") print(f"请求发生错误: {e}")