|
|
|
@ -85,10 +85,20 @@ PROMPT_TEMPLATE = """
|
|
|
|
|
{text_chunk}
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def ask_llm(text_chunk):
|
|
|
|
|
"""向大模型提问并获取响应"""
|
|
|
|
|
prompt = PROMPT_TEMPLATE.format(text_chunk=text_chunk)
|
|
|
|
|
return call_qwen_plus(prompt)
|
|
|
|
|
def ask_llm(text_chunk, is_final=False):
|
|
|
|
|
"""调用大模型并处理响应"""
|
|
|
|
|
prompt = PROMPT_TEMPLATE.format(text_chunk=text_chunk, is_final=is_final)
|
|
|
|
|
|
|
|
|
|
def stream_callback(chunk):
|
|
|
|
|
"""SSE流式回调函数"""
|
|
|
|
|
print(chunk, end='', flush=True)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
response = call_qwen_plus(prompt, stream_callback=stream_callback)
|
|
|
|
|
return response
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"调用大模型出错: {str(e)}")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
def process_document(input_path, output_dir):
|
|
|
|
|
"""处理文档主函数"""
|
|
|
|
|