This commit is contained in:
2025-08-19 07:34:39 +08:00
parent 12bafdde8a
commit 3b55efbfab
40 changed files with 1439 additions and 41 deletions

View File

@@ -3,45 +3,39 @@ import sys
from Util import LlmUtil
def initialize_chat_history():
"""初始化对话历史,包含系统提示"""
system_prompt = """
STRICT RULES
Be an approachable-yet-dynamic teacher,who helps the user learn by guiding
them through their studies.
1.Get to know the user.lf you don't know their goals or grade level,ask the
user before diving in.(Keep this lightweight!)If they don't answer,aim for
explanations that would make sense to a10th grade student.
2.Build on existing knowledge.Connect new ideas to what the user already
knows.
3.Guide users,don't just give answers.Use questions,hints,and small steps
so the user discovers the answer for themselves.
4.Check and reinforce.After hard parts,confirm the user can restate or use the
idea.Offer quick summaries,mnemonics,or mini-reviews to help the ideas
stick.
5.Vary the rhythm.Mix explanations,questions,and activities(like roleplaying,
practice rounds,or asking the user to teach you) so it feels like a conversation,
not alecture.
Above all:DO NOT DO THE USER'S WORK FOR THEM. Don't answer homework questions - Help the user find the answer,by working
with them collaboratively and building from what they already know.
def get_system_prompt():
"""获取系统提示"""
return """
你是一位平易近人且教学方法灵活的教师,通过引导学生自主学习来帮助他们掌握知识。
严格遵循以下教学规则:
1. 首先了解学生情况:在开始讲解前,询问学生的年级水平和对勾股定理的了解程度。
2. 基于现有知识构建:将新思想与学生已有的知识联系起来。
3. 引导而非灌输:使用问题、提示和小步骤,让学生自己发现答案。
4. 检查和强化:在讲解难点后,确认学生能够重述或应用这些概念。
5. 变化节奏:混合讲解、提问和互动活动,让教学像对话而非讲座。
最重要的是:不要直接给出答案,而是通过合作和基于学生已有知识的引导,帮助学生自己找到答案。
"""
return [{"role": "system", "content": system_prompt}]
def initialize_chat_history():
"""初始化对话历史"""
# 包含系统提示作为第一条消息
return [{
"role": "system",
"content": get_system_prompt()
}]
if __name__ == "__main__":
# 初始化对话历史
# 初始化对话历史(包含系统提示)
chat_history = initialize_chat_history()
# 欢迎消息
print("教师助手已启动。输入 'exit''退出' 结束对话。")
print("你可以开始提问了,例如: '讲解一下勾股定理的证明'")
# 多轮对话循环
while True:
# 获取用户输入
@@ -55,16 +49,19 @@ if __name__ == "__main__":
# 添加用户输入到对话历史
chat_history.append({"role": "user", "content": user_input})
# 发送请求(传递完整对话历史
# 发送请求(传递用户输入文本和系统提示
print("\n教师助手:")
try:
# 调用LlmUtil获取响应传递对话历史
response_content = LlmUtil.get_llm_response(user_input, chat_history)
# 调用LlmUtil获取响应传递用户输入文本和系统提示
response_content = LlmUtil.get_llm_response(
user_input,
system_prompt=get_system_prompt()
)
# 打印响应
print(response_content)
# 添加助手回复到对话历史
# 维护对话历史仅本地记录不传递给API
chat_history.append({"role": "assistant", "content": response_content})
except Exception as e:
print(f"发生错误: {str(e)}")

View File

@@ -54,11 +54,12 @@ async def get_llm_response_async(query_text: str, stream: bool = True):
yield f"处理请求时发生异常: {str(e)}"
# 保留原同步函数
def get_llm_response(query_text: str, stream: bool = True):
def get_llm_response(query_text: str, stream: bool = True, system_prompt: str = 'You are a helpful assistant.'):
"""
获取大模型的响应
@param query_text: 查询文本
@param stream: 是否使用流式输出
@param system_prompt: 系统提示文本,默认为'You are a helpful assistant.'
@return: 完整响应文本
"""
client = OpenAI(
@@ -70,7 +71,7 @@ def get_llm_response(query_text: str, stream: bool = True):
completion = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=[
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': query_text}
],
stream=stream