|
|
@ -4,13 +4,13 @@ from lightrag.utils import EmbeddingFunc
|
|
|
|
from Config.Config import *
|
|
|
|
from Config.Config import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_llm_model_func(v_history_messages):
|
|
|
|
def create_llm_model_func(history_messages):
|
|
|
|
def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
|
|
|
def llm_model_func(prompt, system_prompt=None, **kwargs):
|
|
|
|
return openai_complete_if_cache(
|
|
|
|
return openai_complete_if_cache(
|
|
|
|
LLM_MODEL_NAME,
|
|
|
|
LLM_MODEL_NAME,
|
|
|
|
prompt,
|
|
|
|
prompt,
|
|
|
|
system_prompt=system_prompt,
|
|
|
|
system_prompt=system_prompt,
|
|
|
|
history_messages=v_history_messages,
|
|
|
|
history_messages=history_messages,
|
|
|
|
api_key=LLM_API_KEY,
|
|
|
|
api_key=LLM_API_KEY,
|
|
|
|
base_url=LLM_BASE_URL,
|
|
|
|
base_url=LLM_BASE_URL,
|
|
|
|
**kwargs,
|
|
|
|
**kwargs,
|
|
|
|