diff --git a/dsRagAnything/Config/Config.py b/dsRagAnything/Config/Config.py index d702f5e7..733a4c7b 100644 --- a/dsRagAnything/Config/Config.py +++ b/dsRagAnything/Config/Config.py @@ -6,15 +6,15 @@ EMBED_DIM = 1024 EMBED_MAX_TOKEN_SIZE = 8192 # 大模型 【DeepSeek深度求索官方】 -#LLM_API_KEY = "sk-44ae895eeb614aa1a9c6460579e322f1" -#LLM_BASE_URL = "https://api.deepseek.com" -#LLM_MODEL_NAME = "deepseek-chat" +LLM_API_KEY = "sk-44ae895eeb614aa1a9c6460579e322f1" +LLM_BASE_URL = "https://api.deepseek.com" +LLM_MODEL_NAME = "deepseek-chat" # 阿里云提供的大模型服务 -LLM_API_KEY="sk-f6da0c787eff4b0389e4ad03a35a911f" -LLM_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1" +#LLM_API_KEY="sk-f6da0c787eff4b0389e4ad03a35a911f" +#LLM_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1" # LLM_MODEL_NAME = "qwen-plus" # 不要使用通义千问,会导致化学方程式不正确! -LLM_MODEL_NAME = "deepseek-v3" +#LLM_MODEL_NAME = "deepseek-v3" # 视觉模型 VISION_API_KEY = "sk-pbqibyjwhrgmnlsmdygplahextfaclgnedetybccknxojlyl" diff --git a/dsRagAnything/Config/__pycache__/Config.cpython-310.pyc b/dsRagAnything/Config/__pycache__/Config.cpython-310.pyc index b8d1319b..1fe5143e 100644 Binary files a/dsRagAnything/Config/__pycache__/Config.cpython-310.pyc and b/dsRagAnything/Config/__pycache__/Config.cpython-310.pyc differ diff --git a/dsRagAnything/Start.py b/dsRagAnything/Start.py index daceacd6..2d14d632 100644 --- a/dsRagAnything/Start.py +++ b/dsRagAnything/Start.py @@ -1,7 +1,5 @@ import json import logging -import os -from logging.handlers import RotatingFileHandler import fastapi import uvicorn @@ -14,9 +12,6 @@ from starlette.staticfiles import StaticFiles from Util.RagUtil import create_llm_model_func, create_vision_model_func, create_embedding_func - -import logging - # 在程序开始时添加以下配置 logging.basicConfig( level=logging.INFO, # 设置日志级别为INFO @@ -82,12 +77,12 @@ async def rag(request: fastapi.Request): ) # 直接使用app.state中已初始化的rag实例 resp = await app.state.rag.aquery( - user_prompt=user_prompt, + #user_prompt=user_prompt, query=query, mode="hybrid", stream=True ) - + print(resp) async for chunk in resp: if not chunk: continue diff --git a/dsRagAnything/Topic/Chinese/kv_store_llm_response_cache.json b/dsRagAnything/Topic/Chinese/kv_store_llm_response_cache.json index 1333a759..141c470c 100644 --- a/dsRagAnything/Topic/Chinese/kv_store_llm_response_cache.json +++ b/dsRagAnything/Topic/Chinese/kv_store_llm_response_cache.json @@ -2351,6 +2351,16 @@ "embedding_min": null, "embedding_max": null, "original_prompt": "苏轼的家人都有谁?\n 1、不要输出参考资料 或者 References !\n 2、如果问题与提供的知识库内容不符,则明确告诉未在知识库范围内提到!" + }, + "9b37cd625bfa223f5c83501990ad6be1": { + "return": "{\"high_level_keywords\": [\"\\u82cf\\u8f7c\", \"\\u5bb6\\u4eba\", \"\\u5bb6\\u5ead\\u5173\\u7cfb\"], \"low_level_keywords\": [\"\\u7236\\u4eb2\", \"\\u6bcd\\u4eb2\", \"\\u5144\\u5f1f\", \"\\u59d0\\u59b9\", \"\\u59bb\\u5b50\", \"\\u5b50\\u5973\", \"\\u82cf\\u6d35\", \"\\u82cf\\u8f99\"]}", + "cache_type": "keywords", + "chunk_id": null, + "embedding": null, + "embedding_shape": null, + "embedding_min": null, + "embedding_max": null, + "original_prompt": "苏轼的家人都有谁?" } } } \ No newline at end of file