main
HuangHai 2 weeks ago
parent e78cfe513e
commit a6a5e94593

@ -62,7 +62,7 @@ async def rag(request: fastapi.Request):
try:
# 初始化RAG组件
llm_model_func = create_llm_model_func(v_history_messages=[])
llm_model_func = create_llm_model_func(history_messages=[])
vision_model_func = create_vision_model_func(llm_model_func)
embedding_func = create_embedding_func()

@ -45,7 +45,7 @@ async def main():
enable_equation_processing=True, # 处理公式
)
# 自定义的大模型函数
llm_model_func = create_llm_model_func(v_history_messages=[])
llm_model_func = create_llm_model_func(history_messages=[])
# 自定义的可视模型函数
vision_model_func = create_vision_model_func(llm_model_func)
# 自定义的嵌入函数

@ -12,7 +12,7 @@ async def load_existing_lightrag():
WORKING_DIR = "./Topic/DongHua"
# 创建 LLM 模型自定义函数
llm_model_func = create_llm_model_func(v_history_messages=[])
llm_model_func = create_llm_model_func(history_messages=[])
# 创建可视模型自定义函数
vision_model_func = create_vision_model_func(llm_model_func)
# 创建嵌入模型自定义函数

@ -4,13 +4,13 @@ from lightrag.utils import EmbeddingFunc
from Config.Config import *
def create_llm_model_func(v_history_messages):
def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
def create_llm_model_func(history_messages):
def llm_model_func(prompt, system_prompt=None, **kwargs):
return openai_complete_if_cache(
LLM_MODEL_NAME,
prompt,
system_prompt=system_prompt,
history_messages=v_history_messages,
history_messages=history_messages,
api_key=LLM_API_KEY,
base_url=LLM_BASE_URL,
**kwargs,

Loading…
Cancel
Save