This commit is contained in:
2025-08-15 16:16:15 +08:00
parent 8a583f7c68
commit 42eae888ae

View File

@@ -4,66 +4,84 @@ import requests
from Config.Config import GPTNB_API_KEY
# API配置
API_URL = "https://goapi.gptnb.ai/v1/chat/completions"
class ModelInteractor:
def __init__(self, api_url="https://goapi.gptnb.ai/v1/chat/completions"):
self.api_url = api_url
self.headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {GPTNB_API_KEY}"
}
# 请求头
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {GPTNB_API_KEY}"
}
def stream_request(self, model, prompt, temperature=0.7):
"""
发送流式请求到模型API
# 请求体 - 添加stream: true参数启用流式响应
payload = {
"model": "gemini-2.5-pro",
"messages": [{
"role": "user",
"content": "请详细介绍一下你自己,分成几个段落来说明"
}],
"temperature": 0.7,
"stream": True # 启用流式响应
}
参数:
- model: 模型名称
- prompt: 用户提示词
- temperature: 温度参数,控制输出的随机性
try:
# 发送POST请求设置stream=True
response = requests.post(
API_URL,
headers=headers,
data=json.dumps(payload),
stream=True,
timeout=30 # 设置超时时间
)
response.raise_for_status() # 检查请求是否成功
返回:
- 无返回值,直接打印流式响应
"""
payload = {
"model": model,
"messages": [{
"role": "user",
"content": prompt
}],
"temperature": temperature,
"stream": True
}
# 逐块处理响应
for chunk in response.iter_content(chunk_size=None):
if chunk:
# 解码chunk
chunk_data = chunk.decode('utf-8', errors='replace')
try:
response = requests.post(
self.api_url,
headers=self.headers,
data=json.dumps(payload),
stream=True,
timeout=30
)
response.raise_for_status()
# 处理可能的多部分响应
for line in chunk_data.splitlines():
line = line.strip()
if not line:
continue
print(f"使用模型 {model} 的流式响应内容: ")
for chunk in response.iter_content(chunk_size=None):
if chunk:
chunk_data = chunk.decode('utf-8', errors='replace')
# 检查是否结束
if line == 'data: [DONE]':
break
for line in chunk_data.splitlines():
line = line.strip()
if not line:
continue
# 去除可能的前缀
if line.startswith('data: '):
line = line[6:]
if line == 'data: [DONE]':
print("\n流式响应结束")
return
# 解析JSON
data = json.loads(line)
# 提取文本内容
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
content = delta.get('content', '')
if content and content != '\n':
# 实时输出内容,不换行
print(content, end='', flush=True)
if line.startswith('data: '):
line = line[6:]
except requests.exceptions.RequestException as e:
print(f"请求发生错误: {e}")
try:
data = json.loads(line)
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
content = delta.get('content', '')
if content and content != '\n':
print(content, end='', flush=True)
except json.JSONDecodeError as e:
print(f"[调试] JSON解析错误: {e}, 内容: {line}")
except requests.exceptions.RequestException as e:
print(f"请求发生错误: {e}")
# 示例使用
if __name__ == "__main__":
# 创建模型交互器实例
interactor = ModelInteractor()
# 使用不同的模型和提示词
model_name = "gemini-2.5-pro"
prompt_text = "请详细介绍一下你自己,分成几个段落来说明"
# 发送流式请求
interactor.stream_request(model_name, prompt_text)