diff --git a/dsRag/Start.py b/dsRag/Start.py index 587a86f4..ca0b5094 100644 --- a/dsRag/Start.py +++ b/dsRag/Start.py @@ -114,11 +114,11 @@ async def rag(request: fastapi.Request): query = data.get('query', '') query_tags = data.get('tags', []) # 调用es进行混合搜索 - search_results = queryByEs(query, query_tags, logger) + search_results = EsSearchUtil.queryByEs(query, query_tags, logger) # 构建提示词 context = "\n".join([ f"结果{i + 1}: {res['tags']['full_content']}" - for i, res in enumerate(search_results['vector_results'] + search_results['text_results']) + for i, res in enumerate(search_results['text_results']) ]) # 添加图片识别提示 prompt = f""" diff --git a/dsRag/Util/EsSearchUtil.py b/dsRag/Util/EsSearchUtil.py index 21fe14e5..09d1c569 100644 --- a/dsRag/Util/EsSearchUtil.py +++ b/dsRag/Util/EsSearchUtil.py @@ -201,47 +201,23 @@ class EsSearchUtil: ) logger.info(f"2. 文本搜索结果数量: {len(text_results['hits']['hits'])}") - # 合并结果 - logger.info("\n=== 最终搜索结果 ===") - logger.info(f"向量搜索结果: {len(vector_results['hits']['hits'])}条") - for i, hit in enumerate(vector_results['hits']['hits'], 1): - logger.info(f" {i}. 文档ID: {hit['_id']}, 相似度分数: {hit['_score']:.2f}") - logger.info(f" 内容: {hit['_source']['user_input']}") - - logger.info("文本精确搜索结果:") - for i, hit in enumerate(text_results['hits']['hits']): - logger.info(f" {i + 1}. 文档ID: {hit['_id']}, 匹配分数: {hit['_score']:.2f}") - logger.info(f" 内容: {hit['_source']['user_input']}") - - # 去重处理:去除vector_results和text_results中重复的user_input - vector_sources = [hit['_source'] for hit in vector_results['hits']['hits']] - text_sources = [hit['_source'] for hit in text_results['hits']['hits']] - - # 构建去重后的结果 - unique_text_sources = [] - text_user_inputs = set() - - # 先处理text_results,保留所有 - for source in text_sources: - text_user_inputs.add(source['user_input']) - unique_text_sources.append(source) - - # 处理vector_results,只保留不在text_results中的 - unique_vector_sources = [] - for source in vector_sources: - if source['user_input'] not in text_user_inputs: - unique_vector_sources.append(source) - - # 计算优化掉的记录数量和节约的tokens - removed_count = len(vector_sources) - len(unique_vector_sources) - saved_tokens = sum(len(source['user_input']) for source in vector_sources - if source['user_input'] in text_user_inputs) - - logger.info(f"优化掉 {removed_count} 条重复记录,节约约 {saved_tokens} tokens") + # 合并vector和text结果 + all_sources = [hit['_source'] for hit in vector_results['hits']['hits']] + \ + [hit['_source'] for hit in text_results['hits']['hits']] + + # 去重处理 + unique_sources = [] + seen_user_inputs = set() + + for source in all_sources: + if source['user_input'] not in seen_user_inputs: + seen_user_inputs.add(source['user_input']) + unique_sources.append(source) + + logger.info(f"合并后去重结果数量: {len(unique_sources)}条") search_results = { - "vector_results": unique_vector_sources, - "text_results": unique_text_sources + "text_results": unique_sources } return search_results finally: diff --git a/dsRag/Util/__pycache__/EsSearchUtil.cpython-310.pyc b/dsRag/Util/__pycache__/EsSearchUtil.cpython-310.pyc index 5b37225a..b8660cd0 100644 Binary files a/dsRag/Util/__pycache__/EsSearchUtil.cpython-310.pyc and b/dsRag/Util/__pycache__/EsSearchUtil.cpython-310.pyc differ