'commit'
This commit is contained in:
@@ -1,10 +1,15 @@
|
|||||||
# pip install pydantic
|
# pip install pydantic requests
|
||||||
from langchain_core.documents import Document
|
from langchain_core.documents import Document
|
||||||
from langchain_core.vectorstores import InMemoryVectorStore
|
from langchain_core.vectorstores import InMemoryVectorStore
|
||||||
from langchain_openai import OpenAIEmbeddings
|
from langchain_openai import OpenAIEmbeddings
|
||||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||||
from pydantic import SecretStr # 导入 SecretStr
|
from pydantic import SecretStr # 导入 SecretStr
|
||||||
from Config.Config import EMBED_MODEL_NAME, EMBED_BASE_URL, EMBED_API_KEY
|
import requests
|
||||||
|
import json
|
||||||
|
from Config.Config import (
|
||||||
|
EMBED_MODEL_NAME, EMBED_BASE_URL, EMBED_API_KEY,
|
||||||
|
RERANK_MODEL, RERANK_BASE_URL, RERANK_BINDING_API_KEY
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# 模拟长字符串文档内容
|
# 模拟长字符串文档内容
|
||||||
@@ -43,15 +48,56 @@ embeddings = OpenAIEmbeddings(
|
|||||||
vector_store = InMemoryVectorStore(embeddings)
|
vector_store = InMemoryVectorStore(embeddings)
|
||||||
ids = vector_store.add_documents(documents=all_splits)
|
ids = vector_store.add_documents(documents=all_splits)
|
||||||
|
|
||||||
# 向量查询
|
# 向量查询 - 获取更多结果用于重排
|
||||||
results = vector_store.similarity_search(
|
query = "混凝土"
|
||||||
"混凝土", k=2
|
results = vector_store.similarity_search(query, k=4) # 获取4个结果用于重排
|
||||||
)
|
|
||||||
|
|
||||||
# 打印所有查询结果
|
print("向量搜索结果数量:", len(results))
|
||||||
print("查询结果数量:", len(results))
|
|
||||||
print("查询结果:")
|
# 存储重排后的文档和分数
|
||||||
for i, result in enumerate(results):
|
reranked_docs_with_scores = []
|
||||||
print(f"结果 {i+1}:")
|
|
||||||
|
# 调用重排模型
|
||||||
|
if len(results) > 1:
|
||||||
|
# 准备重排请求数据
|
||||||
|
rerank_data = {
|
||||||
|
"model": RERANK_MODEL,
|
||||||
|
"query": query,
|
||||||
|
"documents": [doc.page_content for doc in results],
|
||||||
|
"top_n": len(results)
|
||||||
|
}
|
||||||
|
|
||||||
|
# 调用SiliconFlow API进行重排
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": f"Bearer {RERANK_BINDING_API_KEY}"
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.post(RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data))
|
||||||
|
response.raise_for_status() # 检查请求是否成功
|
||||||
|
rerank_result = response.json()
|
||||||
|
|
||||||
|
# 处理重排结果,保留分数
|
||||||
|
for item in rerank_result.get("results", []):
|
||||||
|
doc_idx = item.get("index")
|
||||||
|
score = item.get("score", 0.0) # 获取可信度分数
|
||||||
|
if 0 <= doc_idx < len(results):
|
||||||
|
reranked_docs_with_scores.append((results[doc_idx], score))
|
||||||
|
|
||||||
|
print("重排后结果数量:", len(reranked_docs_with_scores))
|
||||||
|
except Exception as e:
|
||||||
|
print(f"重排模型调用失败: {e}")
|
||||||
|
print("将使用原始搜索结果")
|
||||||
|
# 使用原始结果,分数设为0.0
|
||||||
|
reranked_docs_with_scores = [(doc, 0.0) for doc in results]
|
||||||
|
else:
|
||||||
|
# 只有一个结果,无需重排
|
||||||
|
reranked_docs_with_scores = [(doc, 1.0) for doc in results] # 单个结果可信度设为1.0
|
||||||
|
|
||||||
|
# 打印所有查询结果及其可信度
|
||||||
|
print("最终查询结果:")
|
||||||
|
for i, (result, score) in enumerate(reranked_docs_with_scores):
|
||||||
|
print(f"结果 {i+1} (可信度: {score:.4f}):")
|
||||||
print(result.page_content)
|
print(result.page_content)
|
||||||
print("---")
|
print("---")
|
||||||
|
Reference in New Issue
Block a user