This commit is contained in:
2025-08-15 16:16:15 +08:00
parent 8a583f7c68
commit 42eae888ae

View File

@@ -4,66 +4,84 @@ import requests
from Config.Config import GPTNB_API_KEY from Config.Config import GPTNB_API_KEY
# API配置 class ModelInteractor:
API_URL = "https://goapi.gptnb.ai/v1/chat/completions" def __init__(self, api_url="https://goapi.gptnb.ai/v1/chat/completions"):
self.api_url = api_url
self.headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {GPTNB_API_KEY}"
}
# 请求头 def stream_request(self, model, prompt, temperature=0.7):
headers = { """
"Content-Type": "application/json", 发送流式请求到模型API
"Authorization": f"Bearer {GPTNB_API_KEY}"
}
# 请求体 - 添加stream: true参数启用流式响应 参数:
payload = { - model: 模型名称
"model": "gemini-2.5-pro", - prompt: 用户提示词
"messages": [{ - temperature: 温度参数,控制输出的随机性
"role": "user",
"content": "请详细介绍一下你自己,分成几个段落来说明"
}],
"temperature": 0.7,
"stream": True # 启用流式响应
}
try: 返回:
# 发送POST请求设置stream=True - 无返回值,直接打印流式响应
response = requests.post( """
API_URL, payload = {
headers=headers, "model": model,
data=json.dumps(payload), "messages": [{
stream=True, "role": "user",
timeout=30 # 设置超时时间 "content": prompt
) }],
response.raise_for_status() # 检查请求是否成功 "temperature": temperature,
"stream": True
}
# 逐块处理响应 try:
for chunk in response.iter_content(chunk_size=None): response = requests.post(
if chunk: self.api_url,
# 解码chunk headers=self.headers,
chunk_data = chunk.decode('utf-8', errors='replace') data=json.dumps(payload),
stream=True,
timeout=30
)
response.raise_for_status()
# 处理可能的多部分响应 print(f"使用模型 {model} 的流式响应内容: ")
for line in chunk_data.splitlines(): for chunk in response.iter_content(chunk_size=None):
line = line.strip() if chunk:
if not line: chunk_data = chunk.decode('utf-8', errors='replace')
continue
# 检查是否结束 for line in chunk_data.splitlines():
if line == 'data: [DONE]': line = line.strip()
break if not line:
continue
# 去除可能的前缀 if line == 'data: [DONE]':
if line.startswith('data: '): print("\n流式响应结束")
line = line[6:] return
# 解析JSON if line.startswith('data: '):
data = json.loads(line) line = line[6:]
# 提取文本内容
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
content = delta.get('content', '')
if content and content != '\n':
# 实时输出内容,不换行
print(content, end='', flush=True)
except requests.exceptions.RequestException as e: try:
print(f"请求发生错误: {e}") data = json.loads(line)
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
content = delta.get('content', '')
if content and content != '\n':
print(content, end='', flush=True)
except json.JSONDecodeError as e:
print(f"[调试] JSON解析错误: {e}, 内容: {line}")
except requests.exceptions.RequestException as e:
print(f"请求发生错误: {e}")
# 示例使用
if __name__ == "__main__":
# 创建模型交互器实例
interactor = ModelInteractor()
# 使用不同的模型和提示词
model_name = "gemini-2.5-pro"
prompt_text = "请详细介绍一下你自己,分成几个段落来说明"
# 发送流式请求
interactor.stream_request(model_name, prompt_text)