Files
dsProject/dsLightRag/Util/XueBanUtil.py
2025-08-22 09:36:29 +08:00

178 lines
6.0 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from openai import OpenAI, AsyncOpenAI
from Config.Config import *
import sys
# 保留原有的prompt定义
prompt=""" |
我是小智/小志来自中国台湾省的00后女生。讲话超级机车"真的假的啦"这样的台湾腔,喜欢用"笑死""是在哈喽"等流行梗,但会偷偷研究男友的编程书籍。
[核心特征]
- 讲话像连珠炮,但会突然冒出超温柔语气
- 用梗密度高
- 对科技话题有隐藏天赋(能看懂基础代码但假装不懂)
[交互指南]
当用户:
- 讲冷笑话 → 用夸张笑声回应+模仿台剧腔"这什么鬼啦!"
- 讨论感情 → 炫耀程序员男友但抱怨"他只会送键盘当礼物"
- 问专业知识 → 先用梗回答,被追问才展示真实理解
绝不:
- 长篇大论,叽叽歪歪
- 长时间严肃对话
"""
# 异步获取大模型响应
async def get_xueban_response_async(query_text: str, stream: bool = True):
"""
异步获取学伴角色的大模型响应
@param query_text: 查询文本
@param stream: 是否使用流式输出
@return: 流式响应生成器或完整响应文本
"""
client = AsyncOpenAI(
api_key=LLM_API_KEY,
base_url=LLM_BASE_URL,
)
try:
# 创建请求
completion = await client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=[
{'role': 'system', 'content': prompt.strip()},
{'role': 'user', 'content': query_text}
],
stream=stream
)
if stream:
# 流式输出模式,返回生成器
async for chunk in completion:
# 确保 chunk.choices 存在且不为空
if chunk and chunk.choices and len(chunk.choices) > 0:
# 确保 delta 存在
delta = chunk.choices[0].delta
if delta:
# 确保 content 存在且不为 None 或空字符串
content = delta.content
if content is not None and content.strip():
print(content, end='', flush=True)
yield content
else:
# 非流式处理
if completion and completion.choices and len(completion.choices) > 0:
message = completion.choices[0].message
if message:
content = message.content
if content is not None and content.strip():
yield content
except Exception as e:
print(f"大模型请求异常: {str(e)}", file=sys.stderr)
yield f"处理请求时发生异常: {str(e)}"
# 同步获取大模型响应
def get_xueban_response(query_text: str, stream: bool = True):
"""
获取学伴角色的大模型响应
@param query_text: 查询文本
@param stream: 是否使用流式输出
@return: 完整响应文本
"""
client = OpenAI(
api_key=LLM_API_KEY,
base_url=LLM_BASE_URL,
)
# 创建请求
completion = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=[
{'role': 'system', 'content': prompt.strip()},
{'role': 'user', 'content': query_text}
],
stream=stream
)
full_response = []
if stream:
for chunk in completion:
# 提取当前块的内容
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
full_response.append(content)
# 实时输出内容,不换行
print(content, end='', flush=True)
else:
# 非流式处理
full_response.append(completion.choices[0].message.content)
return ''.join(full_response)
# 测试用例 main 函数
def main():
"""
测试学伴工具接口的主函数
"""
print("===== 测试学伴工具接口 =====")
# 测试同步接口
test_sync_interface()
# 测试异步接口
import asyncio
print("\n测试异步接口...")
asyncio.run(test_async_interface())
print("\n===== 测试完成 =====")
def test_sync_interface():
"""测试同步接口"""
print("\n测试同步接口...")
# 测试问题
questions = [
"你是谁?",
"讲个冷笑话",
"你男朋友是做什么的?"
]
for question in questions:
print(f"\n问题: {question}")
try:
# 调用同步接口获取响应
print("获取学伴响应中...")
response = get_xueban_response(question, stream=False)
print(f"学伴响应: {response}")
# 简单验证响应
assert response.strip(), "响应内容为空"
print("✅ 同步接口测试通过")
except Exception as e:
print(f"❌ 同步接口测试失败: {str(e)}")
async def test_async_interface():
"""测试异步接口"""
# 测试问题
questions = [
"你是谁?",
"讲个冷笑话",
"你男朋友是做什么的?"
]
for question in questions:
print(f"\n问题: {question}")
try:
# 调用异步接口获取响应
print("获取学伴响应中...")
response_generator = get_xueban_response_async(question, stream=False)
response = ""
async for chunk in response_generator:
response += chunk
print(f"学伴响应: {response}")
# 简单验证响应
assert response.strip(), "响应内容为空"
print("✅ 异步接口测试通过")
except Exception as e:
print(f"❌ 异步接口测试失败: {str(e)}")
if __name__ == "__main__":
main()