'commit'
This commit is contained in:
@@ -13,7 +13,7 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from pydantic import SecretStr
|
||||
from Config import Config
|
||||
|
||||
from typing import List, Tuple, Dict
|
||||
# 初始化日志
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
@@ -285,72 +285,29 @@ class EsSearchUtil:
|
||||
query_embedding = embeddings.embed_query(query)
|
||||
return query_embedding
|
||||
|
||||
def search_by_vector(self, query_embedding: list, k: int = 10) -> list:
|
||||
def rerank_results(self, query: str, results: List[Dict]) -> List[Tuple[Dict, float]]:
|
||||
"""
|
||||
在Elasticsearch中按向量搜索
|
||||
|
||||
参数:
|
||||
query_embedding: 查询向量
|
||||
k: 返回结果数量
|
||||
|
||||
返回:
|
||||
list: 搜索结果
|
||||
"""
|
||||
# 从连接池获取连接
|
||||
conn = self.es_pool.get_connection()
|
||||
|
||||
try:
|
||||
# 构建向量查询DSL
|
||||
query = {
|
||||
"query": {
|
||||
"script_score": {
|
||||
"query": {"match_all": {}},
|
||||
"script": {
|
||||
"source": "cosineSimilarity(params.query_vector, 'embedding') + 1.0",
|
||||
"params": {
|
||||
"query_vector": query_embedding
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"size": k
|
||||
}
|
||||
|
||||
# 执行查询
|
||||
response = conn.search(index=self.es_config['index_name'], body=query)
|
||||
return response['hits']['hits']
|
||||
except Exception as e:
|
||||
logger.error(f"向量查询失败: {e}")
|
||||
print(f"向量查询失败: {e}")
|
||||
return []
|
||||
finally:
|
||||
# 释放连接回连接池
|
||||
self.es_pool.release_connection(conn)
|
||||
|
||||
def rerank_results(self, query: str, results: list) -> list:
|
||||
"""
|
||||
使用重排模型对结果进行排序
|
||||
对搜索结果进行重排
|
||||
|
||||
参数:
|
||||
query: 查询文本
|
||||
results: 初始搜索结果
|
||||
results: 搜索结果列表
|
||||
|
||||
返回:
|
||||
list: 重排后的结果
|
||||
list: 重排后的结果列表,每个元素是(文档, 分数)元组
|
||||
"""
|
||||
if len(results) <= 1:
|
||||
# 结果太少,无需重排
|
||||
return [(result, 1.0) for result in results]
|
||||
return [(doc, 1.0) for doc in results]
|
||||
|
||||
# 准备重排请求数据
|
||||
rerank_data = {
|
||||
"model": Config.RERANK_MODEL,
|
||||
"query": query,
|
||||
"documents": [result['_source']['user_input'] for result in results],
|
||||
"documents": [doc['_source']['user_input'] for doc in results],
|
||||
"top_n": len(results)
|
||||
}
|
||||
|
||||
# 调用重排API
|
||||
# 调用API进行重排
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {Config.RERANK_BINDING_API_KEY}"
|
||||
@@ -361,45 +318,78 @@ class EsSearchUtil:
|
||||
response.raise_for_status()
|
||||
rerank_result = response.json()
|
||||
|
||||
# 检查响应结构
|
||||
if 'results' not in rerank_result:
|
||||
logger.error(f"重排API响应结构不正确,缺少'results'字段: {rerank_result}")
|
||||
print(f"重排API响应结构不正确,缺少'results'字段")
|
||||
return [(result, 1.0) for result in results]
|
||||
# 处理重排结果
|
||||
reranked_docs_with_scores = []
|
||||
if "results" in rerank_result:
|
||||
for item in rerank_result["results"]:
|
||||
# 尝试获取index和relevance_score字段
|
||||
doc_idx = item.get("index")
|
||||
score = item.get("relevance_score", 0.0)
|
||||
|
||||
# 如果找不到,尝试fallback到document和score字段
|
||||
if doc_idx is None:
|
||||
doc_idx = item.get("document")
|
||||
if score == 0.0:
|
||||
score = item.get("score", 0.0)
|
||||
|
||||
# 构建重排后的结果列表
|
||||
reranked_pairs = []
|
||||
for item in rerank_result['results']:
|
||||
# 尝试获取文档索引,优先使用'index'字段,其次是'document'字段
|
||||
doc_idx = item.get('index', item.get('document', -1))
|
||||
if doc_idx == -1:
|
||||
logger.error(f"重排结果项缺少有效索引字段: {item}")
|
||||
print(f"重排结果项结构不正确")
|
||||
continue
|
||||
|
||||
# 尝试获取分数,优先使用'relevance_score'字段,其次是'score'字段
|
||||
score = item.get('relevance_score', item.get('score', 1.0))
|
||||
|
||||
# 检查索引是否有效
|
||||
if 0 <= doc_idx < len(results):
|
||||
reranked_pairs.append((results[doc_idx], score))
|
||||
else:
|
||||
logger.error(f"文档索引{doc_idx}超出范围")
|
||||
print(f"文档索引超出范围")
|
||||
if doc_idx is not None and 0 <= doc_idx < len(results):
|
||||
reranked_docs_with_scores.append((results[doc_idx], score))
|
||||
logger.debug(f"重排结果: 文档索引={doc_idx}, 分数={score}")
|
||||
else:
|
||||
logger.warning(f"重排结果项索引无效: {doc_idx}")
|
||||
|
||||
# 如果没有有效的重排结果,返回原始结果
|
||||
if not reranked_pairs:
|
||||
logger.warning("没有有效的重排结果,返回原始结果")
|
||||
return [(result, 1.0) for result in results]
|
||||
if not reranked_docs_with_scores:
|
||||
logger.warning("没有获取到有效的重排结果,返回原始结果")
|
||||
return [(doc, 1.0) for doc in results]
|
||||
|
||||
# 按分数降序排序
|
||||
reranked_pairs.sort(key=lambda x: x[1], reverse=True)
|
||||
return reranked_pairs
|
||||
return reranked_docs_with_scores
|
||||
except Exception as e:
|
||||
logger.error(f"重排失败: {str(e)}")
|
||||
print(f"重排失败: {e}")
|
||||
# 重排失败时返回原始结果
|
||||
return [(result, 1.0) for result in results]
|
||||
return [(doc, 1.0) for doc in results]
|
||||
|
||||
def search_by_vector(self, query_embedding: list, k: int = 10) -> dict:
|
||||
"""
|
||||
在Elasticsearch中按向量搜索
|
||||
|
||||
参数:
|
||||
query_embedding: 查询向量
|
||||
k: 返回结果数量
|
||||
|
||||
返回:
|
||||
dict: 搜索结果
|
||||
"""
|
||||
# 从连接池获取连接
|
||||
conn = self.es_pool.get_connection()
|
||||
try:
|
||||
# 构建向量搜索查询
|
||||
query = {
|
||||
"query": {
|
||||
"script_score": {
|
||||
"query": {
|
||||
"bool": {
|
||||
"should": [],
|
||||
"minimum_should_match": 0
|
||||
}
|
||||
},
|
||||
"script": {
|
||||
"source": "double score = cosineSimilarity(params.query_vector, 'embedding'); return score >= 0 ? score : 0",
|
||||
"params": {"query_vector": query_embedding}
|
||||
}
|
||||
}
|
||||
},
|
||||
"size": k
|
||||
}
|
||||
|
||||
# 执行查询
|
||||
response = conn.search(index=self.es_config['index_name'], body=query)
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"向量搜索失败: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# 释放连接回连接池
|
||||
self.es_pool.release_connection(conn)
|
||||
|
||||
def display_results(self, results: list, show_score: bool = True) -> None:
|
||||
"""
|
||||
|
Reference in New Issue
Block a user