'commit'
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
# pip install pydantic requests
|
# pip install pydantic requests
|
||||||
from ElasticSearch.Utils.VectorUtil import text_to_vector_db, query_vector_db
|
from ElasticSearch.Utils.VectorDBUtil import VectorDBUtil
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@@ -16,12 +16,15 @@ def main():
|
|||||||
|
|
||||||
随着建筑技术的发展,高性能混凝土、自密实混凝土、再生骨料混凝土等新型混凝土不断涌现,为土木工程领域提供了更多的选择。"""
|
随着建筑技术的发展,高性能混凝土、自密实混凝土、再生骨料混凝土等新型混凝土不断涌现,为土木工程领域提供了更多的选择。"""
|
||||||
|
|
||||||
|
# 创建工具实例
|
||||||
|
vector_util = VectorDBUtil()
|
||||||
|
|
||||||
# 调用文本入库功能
|
# 调用文本入库功能
|
||||||
vector_store, doc_count, split_count = text_to_vector_db(long_text)
|
vector_util.text_to_vector_db(long_text)
|
||||||
|
|
||||||
# 调用文本查询功能
|
# 调用文本查询功能
|
||||||
query = "混凝土"
|
query = "混凝土"
|
||||||
reranked_results = query_vector_db(vector_store, query, k=4)
|
reranked_results = vector_util.query_vector_db(query, k=4)
|
||||||
|
|
||||||
# 打印所有查询结果及其可信度
|
# 打印所有查询结果及其可信度
|
||||||
print("最终查询结果:")
|
print("最终查询结果:")
|
||||||
|
@@ -1,5 +1,3 @@
|
|||||||
import warnings
|
|
||||||
|
|
||||||
from Config import Config
|
from Config import Config
|
||||||
from ElasticSearch.Utils.EsSearchUtil import EsSearchUtil
|
from ElasticSearch.Utils.EsSearchUtil import EsSearchUtil
|
||||||
|
|
||||||
|
@@ -32,8 +32,7 @@ if __name__ == "__main__":
|
|||||||
print(f"3. 前3维向量值: {query_embedding[:3]}")
|
print(f"3. 前3维向量值: {query_embedding[:3]}")
|
||||||
|
|
||||||
print("4. 正在执行Elasticsearch向量搜索...")
|
print("4. 正在执行Elasticsearch向量搜索...")
|
||||||
vector_results = search_util.search_by_vector(query_embedding, k=5)
|
vector_hits = search_util.search_by_vector(query_embedding, k=5)
|
||||||
vector_hits = vector_results['hits']['hits']
|
|
||||||
print(f"5. 向量搜索结果数量: {len(vector_hits)}")
|
print(f"5. 向量搜索结果数量: {len(vector_hits)}")
|
||||||
|
|
||||||
# 向量结果重排
|
# 向量结果重排
|
||||||
|
@@ -185,33 +185,33 @@ class EsSearchUtil:
|
|||||||
# 2. 从连接池获取连接
|
# 2. 从连接池获取连接
|
||||||
conn = search_util.es_pool.get_connection()
|
conn = search_util.es_pool.get_connection()
|
||||||
|
|
||||||
# 3. 检查索引是否存在,不存在则创建
|
# # 3. 检查索引是否存在,不存在则创建
|
||||||
index_name = Config.ES_CONFIG['index_name']
|
index_name = Config.ES_CONFIG['index_name']
|
||||||
if not conn.indices.exists(index=index_name):
|
# if not conn.indices.exists(index=index_name):
|
||||||
# 定义mapping结构
|
# # 定义mapping结构
|
||||||
mapping = {
|
# mapping = {
|
||||||
"mappings": {
|
# "mappings": {
|
||||||
"properties": {
|
# "properties": {
|
||||||
"embedding": {
|
# "embedding": {
|
||||||
"type": "dense_vector",
|
# "type": "dense_vector",
|
||||||
"dims": 1024, # 根据实际embedding维度调整
|
# "dims": Config.EMBED_DIM, # 根据实际embedding维度调整
|
||||||
"index": True,
|
# "index": True,
|
||||||
"similarity": "l2_norm"
|
# "similarity": "l2_norm"
|
||||||
},
|
# },
|
||||||
"user_input": {"type": "text"},
|
# "user_input": {"type": "text"},
|
||||||
"tags": {
|
# "tags": {
|
||||||
"type": "object",
|
# "type": "object",
|
||||||
"properties": {
|
# "properties": {
|
||||||
"tags": {"type": "keyword"},
|
# "tags": {"type": "keyword"},
|
||||||
"full_content": {"type": "text"}
|
# "full_content": {"type": "text"}
|
||||||
}
|
# }
|
||||||
},
|
# },
|
||||||
"timestamp": {"type": "date"}
|
# "timestamp": {"type": "date"}
|
||||||
}
|
# }
|
||||||
}
|
# }
|
||||||
}
|
# }
|
||||||
conn.indices.create(index=index_name, body=mapping)
|
# conn.indices.create(index=index_name, body=mapping)
|
||||||
print(f"索引 '{index_name}' 创建成功")
|
# print(f"索引 '{index_name}' 创建成功")
|
||||||
|
|
||||||
# 4. 切割文本
|
# 4. 切割文本
|
||||||
text_chunks = self.split_text_into_chunks(long_text)
|
text_chunks = self.split_text_into_chunks(long_text)
|
||||||
@@ -285,108 +285,128 @@ class EsSearchUtil:
|
|||||||
query_embedding = embeddings.embed_query(query)
|
query_embedding = embeddings.embed_query(query)
|
||||||
return query_embedding
|
return query_embedding
|
||||||
|
|
||||||
def rerank_results(self, query: str, results: List[Dict]) -> List[Tuple[Dict, float]]:
|
def rerank_results(self, query: str, results: list) -> list:
|
||||||
"""
|
"""
|
||||||
对搜索结果进行重排
|
使用重排模型对搜索结果进行重排
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
query: 查询文本
|
query: 查询文本
|
||||||
results: 搜索结果列表
|
results: 搜索结果列表
|
||||||
|
|
||||||
返回:
|
返回:
|
||||||
list: 重排后的结果列表,每个元素是(文档, 分数)元组
|
list: 重排后的结果列表,每个元素是(文档对象, 分数)的元组
|
||||||
"""
|
"""
|
||||||
if len(results) <= 1:
|
if not results:
|
||||||
return [(doc, 1.0) for doc in results]
|
print("警告: 没有搜索结果可供重排")
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
# 准备重排请求数据
|
# 准备重排请求数据
|
||||||
|
# 确保doc是字典并包含'_source'和'user_input'字段
|
||||||
|
documents = []
|
||||||
|
valid_results = []
|
||||||
|
for i, doc in enumerate(results):
|
||||||
|
if isinstance(doc, dict) and '_source' in doc and 'user_input' in doc['_source']:
|
||||||
|
documents.append(doc['_source']['user_input'])
|
||||||
|
valid_results.append(doc)
|
||||||
|
else:
|
||||||
|
print(f"警告: 结果项 {i} 格式不正确,跳过该结果")
|
||||||
|
print(f"结果项内容: {doc}")
|
||||||
|
|
||||||
|
if not documents:
|
||||||
|
print("警告: 没有有效的文档可供重排")
|
||||||
|
# 返回原始结果,但转换为(结果, 分数)的元组格式
|
||||||
|
return [(doc, doc.get('_score', 0.0)) for doc in results]
|
||||||
|
|
||||||
rerank_data = {
|
rerank_data = {
|
||||||
"model": Config.RERANK_MODEL,
|
"model": Config.RERANK_MODEL,
|
||||||
"query": query,
|
"query": query,
|
||||||
"documents": [doc['_source']['user_input'] for doc in results],
|
"documents": documents,
|
||||||
"top_n": len(results)
|
"top_n": len(documents)
|
||||||
}
|
}
|
||||||
|
|
||||||
# 调用API进行重排
|
# 调用重排API
|
||||||
headers = {
|
headers = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Authorization": f"Bearer {Config.RERANK_BINDING_API_KEY}"
|
"Authorization": f"Bearer {Config.RERANK_BINDING_API_KEY}"
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.post(Config.RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data))
|
response = requests.post(Config.RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data))
|
||||||
response.raise_for_status()
|
response.raise_for_status() # 检查请求是否成功
|
||||||
rerank_result = response.json()
|
rerank_result = response.json()
|
||||||
|
|
||||||
# 处理重排结果
|
# 处理重排结果
|
||||||
reranked_docs_with_scores = []
|
reranked_results = []
|
||||||
if "results" in rerank_result:
|
if "results" in rerank_result:
|
||||||
for item in rerank_result["results"]:
|
for item in rerank_result["results"]:
|
||||||
# 尝试获取index和relevance_score字段
|
|
||||||
doc_idx = item.get("index")
|
doc_idx = item.get("index")
|
||||||
score = item.get("relevance_score", 0.0)
|
score = item.get("relevance_score", 0.0)
|
||||||
|
if 0 <= doc_idx < len(valid_results):
|
||||||
# 如果找不到,尝试fallback到document和score字段
|
result = valid_results[doc_idx]
|
||||||
if doc_idx is None:
|
reranked_results.append((result, score))
|
||||||
doc_idx = item.get("document")
|
|
||||||
if score == 0.0:
|
|
||||||
score = item.get("score", 0.0)
|
|
||||||
|
|
||||||
if doc_idx is not None and 0 <= doc_idx < len(results):
|
|
||||||
reranked_docs_with_scores.append((results[doc_idx], score))
|
|
||||||
logger.debug(f"重排结果: 文档索引={doc_idx}, 分数={score}")
|
|
||||||
else:
|
else:
|
||||||
logger.warning(f"重排结果项索引无效: {doc_idx}")
|
print("警告: 无法识别重排API响应格式")
|
||||||
|
# 返回原始结果,但转换为(结果, 分数)的元组格式
|
||||||
|
reranked_results = [(doc, doc.get('_score', 0.0)) for doc in valid_results]
|
||||||
|
|
||||||
# 如果没有有效的重排结果,返回原始结果
|
print(f"重排后结果数量:{len(reranked_results)}")
|
||||||
if not reranked_docs_with_scores:
|
return reranked_results
|
||||||
logger.warning("没有获取到有效的重排结果,返回原始结果")
|
|
||||||
return [(doc, 1.0) for doc in results]
|
|
||||||
|
|
||||||
return reranked_docs_with_scores
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"重排失败: {str(e)}")
|
print(f"重排失败: {e}")
|
||||||
return [(doc, 1.0) for doc in results]
|
print("将使用原始搜索结果")
|
||||||
|
# 返回原始结果,但转换为(结果, 分数)的元组格式
|
||||||
|
return [(doc, doc.get('_score', 0.0)) for doc in results]
|
||||||
|
|
||||||
def search_by_vector(self, query_embedding: list, k: int = 10) -> dict:
|
def search_by_vector(self, query_embedding: list, k: int = 10) -> list:
|
||||||
"""
|
"""
|
||||||
在Elasticsearch中按向量搜索
|
根据向量进行相似性搜索
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
query_embedding: 查询向量
|
query_embedding: 查询向量
|
||||||
k: 返回结果数量
|
k: 返回的结果数量
|
||||||
|
|
||||||
返回:
|
返回:
|
||||||
dict: 搜索结果
|
list: 搜索结果列表
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
# 从连接池获取连接
|
# 从连接池获取连接
|
||||||
conn = self.es_pool.get_connection()
|
conn = self.es_pool.get_connection()
|
||||||
try:
|
index_name = Config.ES_CONFIG['index_name']
|
||||||
# 构建向量搜索查询
|
|
||||||
query = {
|
# 执行向量搜索
|
||||||
|
response = conn.search(
|
||||||
|
index=index_name,
|
||||||
|
body={
|
||||||
"query": {
|
"query": {
|
||||||
"script_score": {
|
"script_score": {
|
||||||
"query": {
|
"query": {"match_all": {}},
|
||||||
"bool": {
|
|
||||||
"should": [],
|
|
||||||
"minimum_should_match": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"script": {
|
"script": {
|
||||||
"source": "double score = cosineSimilarity(params.query_vector, 'embedding'); return score >= 0 ? score : 0",
|
"source": "cosineSimilarity(params.query_vector, 'embedding') + 1.0",
|
||||||
"params": {"query_vector": query_embedding}
|
"params": {
|
||||||
|
"query_vector": query_embedding
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"size": k
|
"size": k
|
||||||
}
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# 提取结果
|
||||||
|
# 确保我们提取的是 hits.hits 部分
|
||||||
|
if 'hits' in response and 'hits' in response['hits']:
|
||||||
|
results = response['hits']['hits']
|
||||||
|
print(f"向量搜索结果数量: {len(results)}")
|
||||||
|
return results
|
||||||
|
else:
|
||||||
|
print("警告: 向量搜索响应格式不正确")
|
||||||
|
print(f"响应内容: {response}")
|
||||||
|
return []
|
||||||
|
|
||||||
# 执行查询
|
|
||||||
response = conn.search(index=self.es_config['index_name'], body=query)
|
|
||||||
return response
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"向量搜索失败: {str(e)}")
|
print(f"向量搜索失败: {e}")
|
||||||
raise
|
return []
|
||||||
finally:
|
finally:
|
||||||
# 释放连接回连接池
|
# 释放连接回连接池
|
||||||
self.es_pool.release_connection(conn)
|
self.es_pool.release_connection(conn)
|
||||||
@@ -404,11 +424,53 @@ class EsSearchUtil:
|
|||||||
return
|
return
|
||||||
|
|
||||||
print(f"找到 {len(results)} 条结果:\n")
|
print(f"找到 {len(results)} 条结果:\n")
|
||||||
for i, (result, score) in enumerate(results, 1):
|
for i, item in enumerate(results, 1):
|
||||||
print(f"结果 {i}:")
|
print(f"结果 {i}:")
|
||||||
print(f"内容: {result['_source']['user_input']}")
|
try:
|
||||||
|
# 检查item是否为元组格式 (result, score)
|
||||||
|
if isinstance(item, tuple):
|
||||||
|
if len(item) >= 2:
|
||||||
|
result, score = item[0], item[1]
|
||||||
|
else:
|
||||||
|
result, score = item[0], 0.0
|
||||||
|
else:
|
||||||
|
# 如果不是元组,假设item就是result
|
||||||
|
result = item
|
||||||
|
score = result.get('_score', 0.0)
|
||||||
|
|
||||||
|
# 确保result是字典类型
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
print(f"警告: 结果项 {i} 不是字典类型,跳过显示")
|
||||||
|
print(f"结果项内容: {result}")
|
||||||
|
print("---")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 尝试获取user_input内容
|
||||||
|
if '_source' in result and 'user_input' in result['_source']:
|
||||||
|
content = result['_source']['user_input']
|
||||||
|
print(f"内容: {content}")
|
||||||
|
elif 'user_input' in result:
|
||||||
|
content = result['user_input']
|
||||||
|
print(f"内容: {content}")
|
||||||
|
else:
|
||||||
|
print(f"警告: 结果项 {i} 缺少'user_input'字段")
|
||||||
|
print(f"结果项内容: {result}")
|
||||||
|
print("---")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 显示分数
|
||||||
if show_score:
|
if show_score:
|
||||||
print(f"分数: {score:.4f}")
|
print(f"分数: {score:.4f}")
|
||||||
|
|
||||||
|
# 如果有标签信息,也显示出来
|
||||||
|
if '_source' in result and 'tags' in result['_source']:
|
||||||
|
tags = result['_source']['tags']
|
||||||
|
if isinstance(tags, dict) and 'tags' in tags:
|
||||||
|
print(f"标签: {tags['tags']}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"处理结果项 {i} 时出错: {str(e)}")
|
||||||
|
print(f"结果项内容: {item}")
|
||||||
print("---")
|
print("---")
|
||||||
|
|
||||||
def merge_results(self, keyword_results: List[Tuple[Dict, float]], vector_results: List[Tuple[Dict, float]]) -> List[Tuple[Dict, float, str]]:
|
def merge_results(self, keyword_results: List[Tuple[Dict, float]], vector_results: List[Tuple[Dict, float]]) -> List[Tuple[Dict, float, str]]:
|
||||||
|
125
dsSchoolBuddy/ElasticSearch/Utils/VectorDBUtil.py
Normal file
125
dsSchoolBuddy/ElasticSearch/Utils/VectorDBUtil.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# pip install pydantic requests
|
||||||
|
from langchain_core.documents import Document
|
||||||
|
from langchain_core.vectorstores import InMemoryVectorStore
|
||||||
|
from langchain_openai import OpenAIEmbeddings
|
||||||
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||||
|
from pydantic import SecretStr
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
from Config.Config import (
|
||||||
|
EMBED_MODEL_NAME, EMBED_BASE_URL, EMBED_API_KEY,
|
||||||
|
RERANK_MODEL, RERANK_BASE_URL, RERANK_BINDING_API_KEY
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class VectorDBUtil:
|
||||||
|
"""向量数据库工具类,提供文本向量化存储和查询功能"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""初始化向量数据库工具"""
|
||||||
|
# 初始化嵌入模型
|
||||||
|
self.embeddings = OpenAIEmbeddings(
|
||||||
|
model=EMBED_MODEL_NAME,
|
||||||
|
base_url=EMBED_BASE_URL,
|
||||||
|
api_key=SecretStr(EMBED_API_KEY) # 包装成 SecretStr 类型
|
||||||
|
)
|
||||||
|
# 初始化向量存储
|
||||||
|
self.vector_store = None
|
||||||
|
|
||||||
|
def text_to_vector_db(self, text: str, chunk_size: int = 200, chunk_overlap: int = 0) -> tuple:
|
||||||
|
"""
|
||||||
|
将文本存入向量数据库
|
||||||
|
|
||||||
|
参数:
|
||||||
|
text: 要入库的文本
|
||||||
|
chunk_size: 文本分割块大小
|
||||||
|
chunk_overlap: 文本块重叠大小
|
||||||
|
|
||||||
|
返回:
|
||||||
|
tuple: (向量存储对象, 文档数量, 分割后的文档块数量)
|
||||||
|
"""
|
||||||
|
# 创建文档对象
|
||||||
|
docs = [Document(page_content=text, metadata={"source": "simulated_document"})]
|
||||||
|
doc_count = len(docs)
|
||||||
|
print(f"文档数量:{doc_count} 个")
|
||||||
|
|
||||||
|
# 切割文档
|
||||||
|
text_splitter = RecursiveCharacterTextSplitter(
|
||||||
|
chunk_size=chunk_size, chunk_overlap=chunk_overlap, add_start_index=True
|
||||||
|
)
|
||||||
|
all_splits = text_splitter.split_documents(docs)
|
||||||
|
split_count = len(all_splits)
|
||||||
|
print(f"切割后的文档块数量:{split_count}")
|
||||||
|
|
||||||
|
# 向量存储
|
||||||
|
self.vector_store = InMemoryVectorStore(self.embeddings)
|
||||||
|
ids = self.vector_store.add_documents(documents=all_splits)
|
||||||
|
|
||||||
|
return self.vector_store, doc_count, split_count
|
||||||
|
|
||||||
|
def query_vector_db(self, query: str, k: int = 4) -> list:
|
||||||
|
"""
|
||||||
|
从向量数据库查询文本
|
||||||
|
|
||||||
|
参数:
|
||||||
|
query: 查询字符串
|
||||||
|
k: 要返回的结果数量
|
||||||
|
|
||||||
|
返回:
|
||||||
|
list: 重排后的结果列表,每个元素是(文档对象, 可信度分数)的元组
|
||||||
|
"""
|
||||||
|
if not self.vector_store:
|
||||||
|
print("错误: 向量数据库未初始化,请先调用text_to_vector_db方法")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# 向量查询 - 获取更多结果用于重排
|
||||||
|
results = self.vector_store.similarity_search(query, k=k)
|
||||||
|
print(f"向量搜索结果数量:{len(results)}")
|
||||||
|
|
||||||
|
# 存储重排后的文档和分数
|
||||||
|
reranked_docs_with_scores = []
|
||||||
|
|
||||||
|
# 调用重排模型
|
||||||
|
if len(results) > 1:
|
||||||
|
# 准备重排请求数据
|
||||||
|
rerank_data = {
|
||||||
|
"model": RERANK_MODEL,
|
||||||
|
"query": query,
|
||||||
|
"documents": [doc.page_content for doc in results],
|
||||||
|
"top_n": len(results)
|
||||||
|
}
|
||||||
|
|
||||||
|
# 调用SiliconFlow API进行重排
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": f"Bearer {RERANK_BINDING_API_KEY}"
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.post(RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data))
|
||||||
|
response.raise_for_status() # 检查请求是否成功
|
||||||
|
rerank_result = response.json()
|
||||||
|
|
||||||
|
# 处理重排结果,提取relevance_score
|
||||||
|
if "results" in rerank_result:
|
||||||
|
for item in rerank_result["results"]:
|
||||||
|
doc_idx = item.get("index")
|
||||||
|
score = item.get("relevance_score", 0.0)
|
||||||
|
if 0 <= doc_idx < len(results):
|
||||||
|
reranked_docs_with_scores.append((results[doc_idx], score))
|
||||||
|
else:
|
||||||
|
print("警告: 无法识别重排API响应格式")
|
||||||
|
reranked_docs_with_scores = [(doc, 0.0) for doc in results]
|
||||||
|
|
||||||
|
print(f"重排后结果数量:{len(reranked_docs_with_scores)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"重排模型调用失败: {e}")
|
||||||
|
print("将使用原始搜索结果")
|
||||||
|
reranked_docs_with_scores = [(doc, 0.0) for doc in results]
|
||||||
|
else:
|
||||||
|
# 只有一个结果,无需重排
|
||||||
|
reranked_docs_with_scores = [(doc, 1.0) for doc in results] # 单个结果可信度设为1.0
|
||||||
|
|
||||||
|
return reranked_docs_with_scores
|
||||||
|
|
||||||
|
|
@@ -1,115 +0,0 @@
|
|||||||
# pip install pydantic requests
|
|
||||||
from langchain_core.documents import Document
|
|
||||||
from langchain_core.vectorstores import InMemoryVectorStore
|
|
||||||
from langchain_openai import OpenAIEmbeddings
|
|
||||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
||||||
from pydantic import SecretStr
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
from Config.Config import (
|
|
||||||
EMBED_MODEL_NAME, EMBED_BASE_URL, EMBED_API_KEY,
|
|
||||||
RERANK_MODEL, RERANK_BASE_URL, RERANK_BINDING_API_KEY
|
|
||||||
)
|
|
||||||
|
|
||||||
def text_to_vector_db(text: str, chunk_size: int = 200, chunk_overlap: int = 0) -> tuple:
|
|
||||||
"""
|
|
||||||
将文本存入向量数据库
|
|
||||||
|
|
||||||
参数:
|
|
||||||
text: 要入库的文本
|
|
||||||
chunk_size: 文本分割块大小
|
|
||||||
chunk_overlap: 文本块重叠大小
|
|
||||||
|
|
||||||
返回:
|
|
||||||
tuple: (向量存储对象, 文档数量, 分割后的文档块数量)
|
|
||||||
"""
|
|
||||||
# 创建文档对象
|
|
||||||
docs = [Document(page_content=text, metadata={"source": "simulated_document"})]
|
|
||||||
doc_count = len(docs)
|
|
||||||
print(f"文档数量:{doc_count} 个")
|
|
||||||
|
|
||||||
# 切割文档
|
|
||||||
text_splitter = RecursiveCharacterTextSplitter(
|
|
||||||
chunk_size=chunk_size, chunk_overlap=chunk_overlap, add_start_index=True
|
|
||||||
)
|
|
||||||
all_splits = text_splitter.split_documents(docs)
|
|
||||||
split_count = len(all_splits)
|
|
||||||
print(f"切割后的文档块数量:{split_count}")
|
|
||||||
|
|
||||||
# 嵌入模型
|
|
||||||
embeddings = OpenAIEmbeddings(
|
|
||||||
model=EMBED_MODEL_NAME,
|
|
||||||
base_url=EMBED_BASE_URL,
|
|
||||||
api_key=SecretStr(EMBED_API_KEY) # 包装成 SecretStr 类型
|
|
||||||
)
|
|
||||||
|
|
||||||
# 向量存储
|
|
||||||
vector_store = InMemoryVectorStore(embeddings)
|
|
||||||
ids = vector_store.add_documents(documents=all_splits)
|
|
||||||
|
|
||||||
return vector_store, doc_count, split_count
|
|
||||||
|
|
||||||
|
|
||||||
def query_vector_db(vector_store: InMemoryVectorStore, query: str, k: int = 4) -> list:
|
|
||||||
"""
|
|
||||||
从向量数据库查询文本
|
|
||||||
|
|
||||||
参数:
|
|
||||||
vector_store: 向量存储对象
|
|
||||||
query: 查询字符串
|
|
||||||
k: 要返回的结果数量
|
|
||||||
|
|
||||||
返回:
|
|
||||||
list: 重排后的结果列表,每个元素是(文档对象, 可信度分数)的元组
|
|
||||||
"""
|
|
||||||
# 向量查询 - 获取更多结果用于重排
|
|
||||||
results = vector_store.similarity_search(query, k=k)
|
|
||||||
print(f"向量搜索结果数量:{len(results)}")
|
|
||||||
|
|
||||||
# 存储重排后的文档和分数
|
|
||||||
reranked_docs_with_scores = []
|
|
||||||
|
|
||||||
# 调用重排模型
|
|
||||||
if len(results) > 1:
|
|
||||||
# 准备重排请求数据
|
|
||||||
rerank_data = {
|
|
||||||
"model": RERANK_MODEL,
|
|
||||||
"query": query,
|
|
||||||
"documents": [doc.page_content for doc in results],
|
|
||||||
"top_n": len(results)
|
|
||||||
}
|
|
||||||
|
|
||||||
# 调用SiliconFlow API进行重排
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"Authorization": f"Bearer {RERANK_BINDING_API_KEY}"
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.post(RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data))
|
|
||||||
response.raise_for_status() # 检查请求是否成功
|
|
||||||
rerank_result = response.json()
|
|
||||||
|
|
||||||
# 处理重排结果,提取relevance_score
|
|
||||||
if "results" in rerank_result:
|
|
||||||
for item in rerank_result["results"]:
|
|
||||||
doc_idx = item.get("index")
|
|
||||||
score = item.get("relevance_score", 0.0)
|
|
||||||
if 0 <= doc_idx < len(results):
|
|
||||||
reranked_docs_with_scores.append((results[doc_idx], score))
|
|
||||||
else:
|
|
||||||
print("警告: 无法识别重排API响应格式")
|
|
||||||
reranked_docs_with_scores = [(doc, 0.0) for doc in results]
|
|
||||||
|
|
||||||
print(f"重排后结果数量:{len(reranked_docs_with_scores)}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"重排模型调用失败: {e}")
|
|
||||||
print("将使用原始搜索结果")
|
|
||||||
reranked_docs_with_scores = [(doc, 0.0) for doc in results]
|
|
||||||
else:
|
|
||||||
# 只有一个结果,无需重排
|
|
||||||
reranked_docs_with_scores = [(doc, 1.0) for doc in results] # 单个结果可信度设为1.0
|
|
||||||
|
|
||||||
return reranked_docs_with_scores
|
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user