This commit is contained in:
2025-08-19 10:10:26 +08:00
parent 79c6cc992c
commit e6c3be381d
6 changed files with 83 additions and 210 deletions

View File

@@ -13,7 +13,7 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from pydantic import SecretStr
from Config import Config
from typing import List, Tuple, Dict
# 初始化日志
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@@ -285,72 +285,29 @@ class EsSearchUtil:
query_embedding = embeddings.embed_query(query)
return query_embedding
def search_by_vector(self, query_embedding: list, k: int = 10) -> list:
def rerank_results(self, query: str, results: List[Dict]) -> List[Tuple[Dict, float]]:
"""
在Elasticsearch中按向量搜索
参数:
query_embedding: 查询向量
k: 返回结果数量
返回:
list: 搜索结果
"""
# 从连接池获取连接
conn = self.es_pool.get_connection()
try:
# 构建向量查询DSL
query = {
"query": {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'embedding') + 1.0",
"params": {
"query_vector": query_embedding
}
}
}
},
"size": k
}
# 执行查询
response = conn.search(index=self.es_config['index_name'], body=query)
return response['hits']['hits']
except Exception as e:
logger.error(f"向量查询失败: {e}")
print(f"向量查询失败: {e}")
return []
finally:
# 释放连接回连接池
self.es_pool.release_connection(conn)
def rerank_results(self, query: str, results: list) -> list:
"""
使用重排模型对结果进行排序
对搜索结果进行重排
参数:
query: 查询文本
results: 初始搜索结果
results: 搜索结果列表
返回:
list: 重排后的结果
list: 重排后的结果列表,每个元素是(文档, 分数)元组
"""
if len(results) <= 1:
# 结果太少,无需重排
return [(result, 1.0) for result in results]
return [(doc, 1.0) for doc in results]
# 准备重排请求数据
rerank_data = {
"model": Config.RERANK_MODEL,
"query": query,
"documents": [result['_source']['user_input'] for result in results],
"documents": [doc['_source']['user_input'] for doc in results],
"top_n": len(results)
}
# 调用重排API
# 调用API进行重排
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {Config.RERANK_BINDING_API_KEY}"
@@ -361,45 +318,78 @@ class EsSearchUtil:
response.raise_for_status()
rerank_result = response.json()
# 检查响应结构
if 'results' not in rerank_result:
logger.error(f"重排API响应结构不正确缺少'results'字段: {rerank_result}")
print(f"重排API响应结构不正确缺少'results'字段")
return [(result, 1.0) for result in results]
# 处理重排结果
reranked_docs_with_scores = []
if "results" in rerank_result:
for item in rerank_result["results"]:
# 尝试获取index和relevance_score字段
doc_idx = item.get("index")
score = item.get("relevance_score", 0.0)
# 如果找不到尝试fallback到document和score字段
if doc_idx is None:
doc_idx = item.get("document")
if score == 0.0:
score = item.get("score", 0.0)
# 构建重排后的结果列表
reranked_pairs = []
for item in rerank_result['results']:
# 尝试获取文档索引,优先使用'index'字段,其次是'document'字段
doc_idx = item.get('index', item.get('document', -1))
if doc_idx == -1:
logger.error(f"重排结果项缺少有效索引字段: {item}")
print(f"重排结果项结构不正确")
continue
# 尝试获取分数,优先使用'relevance_score'字段,其次是'score'字段
score = item.get('relevance_score', item.get('score', 1.0))
# 检查索引是否有效
if 0 <= doc_idx < len(results):
reranked_pairs.append((results[doc_idx], score))
else:
logger.error(f"文档索引{doc_idx}超出范围")
print(f"文档索引超出范围")
if doc_idx is not None and 0 <= doc_idx < len(results):
reranked_docs_with_scores.append((results[doc_idx], score))
logger.debug(f"重排结果: 文档索引={doc_idx}, 分数={score}")
else:
logger.warning(f"重排结果项索引无效: {doc_idx}")
# 如果没有有效的重排结果,返回原始结果
if not reranked_pairs:
logger.warning("没有有效的重排结果,返回原始结果")
return [(result, 1.0) for result in results]
if not reranked_docs_with_scores:
logger.warning("没有获取到有效的重排结果,返回原始结果")
return [(doc, 1.0) for doc in results]
# 按分数降序排序
reranked_pairs.sort(key=lambda x: x[1], reverse=True)
return reranked_pairs
return reranked_docs_with_scores
except Exception as e:
logger.error(f"重排失败: {str(e)}")
print(f"重排失败: {e}")
# 重排失败时返回原始结果
return [(result, 1.0) for result in results]
return [(doc, 1.0) for doc in results]
def search_by_vector(self, query_embedding: list, k: int = 10) -> dict:
"""
在Elasticsearch中按向量搜索
参数:
query_embedding: 查询向量
k: 返回结果数量
返回:
dict: 搜索结果
"""
# 从连接池获取连接
conn = self.es_pool.get_connection()
try:
# 构建向量搜索查询
query = {
"query": {
"script_score": {
"query": {
"bool": {
"should": [],
"minimum_should_match": 0
}
},
"script": {
"source": "double score = cosineSimilarity(params.query_vector, 'embedding'); return score >= 0 ? score : 0",
"params": {"query_vector": query_embedding}
}
}
},
"size": k
}
# 执行查询
response = conn.search(index=self.es_config['index_name'], body=query)
return response
except Exception as e:
logger.error(f"向量搜索失败: {str(e)}")
raise
finally:
# 释放连接回连接池
self.es_pool.release_connection(conn)
def display_results(self, results: list, show_score: bool = True) -> None:
"""

View File

@@ -0,0 +1,115 @@
# pip install pydantic requests
from langchain_core.documents import Document
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pydantic import SecretStr
import requests
import json
from Config.Config import (
EMBED_MODEL_NAME, EMBED_BASE_URL, EMBED_API_KEY,
RERANK_MODEL, RERANK_BASE_URL, RERANK_BINDING_API_KEY
)
def text_to_vector_db(text: str, chunk_size: int = 200, chunk_overlap: int = 0) -> tuple:
"""
将文本存入向量数据库
参数:
text: 要入库的文本
chunk_size: 文本分割块大小
chunk_overlap: 文本块重叠大小
返回:
tuple: (向量存储对象, 文档数量, 分割后的文档块数量)
"""
# 创建文档对象
docs = [Document(page_content=text, metadata={"source": "simulated_document"})]
doc_count = len(docs)
print(f"文档数量:{doc_count}")
# 切割文档
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap, add_start_index=True
)
all_splits = text_splitter.split_documents(docs)
split_count = len(all_splits)
print(f"切割后的文档块数量:{split_count}")
# 嵌入模型
embeddings = OpenAIEmbeddings(
model=EMBED_MODEL_NAME,
base_url=EMBED_BASE_URL,
api_key=SecretStr(EMBED_API_KEY) # 包装成 SecretStr 类型
)
# 向量存储
vector_store = InMemoryVectorStore(embeddings)
ids = vector_store.add_documents(documents=all_splits)
return vector_store, doc_count, split_count
def query_vector_db(vector_store: InMemoryVectorStore, query: str, k: int = 4) -> list:
"""
从向量数据库查询文本
参数:
vector_store: 向量存储对象
query: 查询字符串
k: 要返回的结果数量
返回:
list: 重排后的结果列表,每个元素是(文档对象, 可信度分数)的元组
"""
# 向量查询 - 获取更多结果用于重排
results = vector_store.similarity_search(query, k=k)
print(f"向量搜索结果数量:{len(results)}")
# 存储重排后的文档和分数
reranked_docs_with_scores = []
# 调用重排模型
if len(results) > 1:
# 准备重排请求数据
rerank_data = {
"model": RERANK_MODEL,
"query": query,
"documents": [doc.page_content for doc in results],
"top_n": len(results)
}
# 调用SiliconFlow API进行重排
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {RERANK_BINDING_API_KEY}"
}
try:
response = requests.post(RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data))
response.raise_for_status() # 检查请求是否成功
rerank_result = response.json()
# 处理重排结果提取relevance_score
if "results" in rerank_result:
for item in rerank_result["results"]:
doc_idx = item.get("index")
score = item.get("relevance_score", 0.0)
if 0 <= doc_idx < len(results):
reranked_docs_with_scores.append((results[doc_idx], score))
else:
print("警告: 无法识别重排API响应格式")
reranked_docs_with_scores = [(doc, 0.0) for doc in results]
print(f"重排后结果数量:{len(reranked_docs_with_scores)}")
except Exception as e:
print(f"重排模型调用失败: {e}")
print("将使用原始搜索结果")
reranked_docs_with_scores = [(doc, 0.0) for doc in results]
else:
# 只有一个结果,无需重排
reranked_docs_with_scores = [(doc, 1.0) for doc in results] # 单个结果可信度设为1.0
return reranked_docs_with_scores