parent
cbb5b56623
commit
5056cfe2c2
@ -0,0 +1,127 @@
|
||||
import os
|
||||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
import jieba
|
||||
from gensim.models import KeyedVectors
|
||||
|
||||
from Config.Config import ES_CONFIG, MS_MODEL_PATH, MS_MODEL_LIMIT
|
||||
from ElasticSearch.Utils.ElasticsearchConnectionPool import ElasticsearchConnectionPool
|
||||
|
||||
# 初始化日志
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
# 确保日志目录存在
|
||||
os.makedirs('Logs', exist_ok=True)
|
||||
handler = RotatingFileHandler('Logs/start.log', maxBytes=1024 * 1024, backupCount=5)
|
||||
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||
logger.addHandler(handler)
|
||||
|
||||
# 1. 加载预训练的 Word2Vec 模型
|
||||
model = KeyedVectors.load_word2vec_format(MS_MODEL_PATH, binary=False, limit=MS_MODEL_LIMIT)
|
||||
logger.info(f"模型加载成功,词向量维度: {model.vector_size}")
|
||||
|
||||
|
||||
|
||||
def init_es_pool():
|
||||
# 初始化Elasticsearch连接池
|
||||
es_pool = ElasticsearchConnectionPool(
|
||||
hosts=ES_CONFIG["hosts"],
|
||||
basic_auth=ES_CONFIG["basic_auth"],
|
||||
verify_certs=ES_CONFIG["verify_certs"],
|
||||
max_connections=50
|
||||
)
|
||||
logger.info("Elasticsearch连接池初始化完成")
|
||||
return es_pool
|
||||
|
||||
|
||||
# 将文本转换为嵌入向量
|
||||
def text_to_embedding(text):
|
||||
words = jieba.lcut(text) # 使用 jieba 分词
|
||||
print(f"文本: {text}, 分词结果: {words}")
|
||||
try:
|
||||
embeddings = [model[word] for word in words if word in model]
|
||||
logger.info(f"有效词向量数量: {len(embeddings)}")
|
||||
if embeddings:
|
||||
avg_embedding = sum(embeddings) / len(embeddings)
|
||||
logger.info(f"生成的平均向量: {avg_embedding[:5]}...") # 打印前 5 维
|
||||
return avg_embedding
|
||||
else:
|
||||
logger.warning("未找到有效词,返回零向量")
|
||||
return [0.0] * model.vector_size
|
||||
except Exception as e:
|
||||
logger.error(f"向量转换失败: {str(e)}")
|
||||
return [0.0] * model.vector_size
|
||||
|
||||
|
||||
def main():
|
||||
# 初始化ES连接池
|
||||
es_pool = init_es_pool()
|
||||
|
||||
# 测试查询
|
||||
query = "小学数学教学中的若干问题"
|
||||
print(f"\n=== 开始执行查询 ===")
|
||||
print(f"原始查询文本: {query}")
|
||||
|
||||
# 执行混合搜索
|
||||
es_conn = es_pool.get_connection()
|
||||
try:
|
||||
# 向量搜索
|
||||
print("\n=== 向量搜索阶段 ===")
|
||||
print("1. 文本分词和向量化处理中...")
|
||||
query_embedding = text_to_embedding(query)
|
||||
print(f"2. 生成的查询向量维度: {len(query_embedding)}")
|
||||
print(f"3. 前5维向量值: {query_embedding[:5]}")
|
||||
|
||||
print("4. 正在执行Elasticsearch向量搜索...")
|
||||
vector_results = es_conn.search(
|
||||
index=ES_CONFIG['index_name'],
|
||||
body={
|
||||
"query": {
|
||||
"script_score": {
|
||||
"query": {"match_all": {}},
|
||||
"script": {
|
||||
"source": "double score = cosineSimilarity(params.query_vector, 'embedding'); return score >= 0 ? score : 0",
|
||||
"params": {"query_vector": query_embedding}
|
||||
}
|
||||
}
|
||||
},
|
||||
"size": 5
|
||||
}
|
||||
)
|
||||
print(f"5. 向量搜索结果数量: {len(vector_results['hits']['hits'])}")
|
||||
|
||||
# 文本精确搜索
|
||||
print("\n=== 文本精确搜索阶段 ===")
|
||||
print("1. 正在执行Elasticsearch文本精确搜索...")
|
||||
text_results = es_conn.search(
|
||||
index="raw_texts",
|
||||
body={
|
||||
"query": {
|
||||
"match": {
|
||||
"text.keyword": query
|
||||
}
|
||||
},
|
||||
"size": 5
|
||||
}
|
||||
)
|
||||
print(f"2. 文本搜索结果数量: {len(text_results['hits']['hits'])}")
|
||||
|
||||
# 打印详细结果
|
||||
print("\n=== 最终搜索结果 ===")
|
||||
print("向量搜索结果:")
|
||||
for i, hit in enumerate(vector_results['hits']['hits']):
|
||||
print(f" {i+1}. 文档ID: {hit['_id']}, 相似度分数: {hit['_score']:.2f}")
|
||||
|
||||
print("\n文本精确搜索结果:")
|
||||
for i, hit in enumerate(text_results['hits']['hits']):
|
||||
print(f" {i+1}. 文档ID: {hit['_id']}, 匹配分数: {hit['_score']:.2f}")
|
||||
|
||||
finally:
|
||||
es_pool.release_connection(es_conn)
|
||||
|
||||
# 关闭连接池
|
||||
es_pool.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in new issue