From 328882505e6011dd60b2f503f8fb92aed84dbba5 Mon Sep 17 00:00:00 2001 From: HuangHai <10402852@qq.com> Date: Tue, 19 Aug 2025 10:41:58 +0800 Subject: [PATCH] 'commit' --- dsSchoolBuddy/ElasticSearch/T2_Vector.py | 9 +- dsSchoolBuddy/ElasticSearch/T3_InsertData.py | 2 - .../ElasticSearch/T7_XiangLiangQuery.py | 3 +- .../ElasticSearch/Utils/EsSearchUtil.py | 262 +++++++++++------- .../ElasticSearch/Utils/VectorDBUtil.py | 125 +++++++++ .../ElasticSearch/Utils/VectorUtil.py | 115 -------- .../__pycache__/EsSearchUtil.cpython-310.pyc | Bin 11475 -> 12197 bytes .../__pycache__/VectorDBUtil.cpython-310.pyc | Bin 0 -> 4136 bytes 8 files changed, 294 insertions(+), 222 deletions(-) create mode 100644 dsSchoolBuddy/ElasticSearch/Utils/VectorDBUtil.py delete mode 100644 dsSchoolBuddy/ElasticSearch/Utils/VectorUtil.py create mode 100644 dsSchoolBuddy/ElasticSearch/Utils/__pycache__/VectorDBUtil.cpython-310.pyc diff --git a/dsSchoolBuddy/ElasticSearch/T2_Vector.py b/dsSchoolBuddy/ElasticSearch/T2_Vector.py index 774e3599..c805b7c3 100644 --- a/dsSchoolBuddy/ElasticSearch/T2_Vector.py +++ b/dsSchoolBuddy/ElasticSearch/T2_Vector.py @@ -1,5 +1,5 @@ # pip install pydantic requests -from ElasticSearch.Utils.VectorUtil import text_to_vector_db, query_vector_db +from ElasticSearch.Utils.VectorDBUtil import VectorDBUtil def main(): @@ -16,12 +16,15 @@ def main(): 随着建筑技术的发展,高性能混凝土、自密实混凝土、再生骨料混凝土等新型混凝土不断涌现,为土木工程领域提供了更多的选择。""" + # 创建工具实例 + vector_util = VectorDBUtil() + # 调用文本入库功能 - vector_store, doc_count, split_count = text_to_vector_db(long_text) + vector_util.text_to_vector_db(long_text) # 调用文本查询功能 query = "混凝土" - reranked_results = query_vector_db(vector_store, query, k=4) + reranked_results = vector_util.query_vector_db(query, k=4) # 打印所有查询结果及其可信度 print("最终查询结果:") diff --git a/dsSchoolBuddy/ElasticSearch/T3_InsertData.py b/dsSchoolBuddy/ElasticSearch/T3_InsertData.py index 63985aa7..a5c0e76d 100644 --- a/dsSchoolBuddy/ElasticSearch/T3_InsertData.py +++ b/dsSchoolBuddy/ElasticSearch/T3_InsertData.py @@ -1,5 +1,3 @@ -import warnings - from Config import Config from ElasticSearch.Utils.EsSearchUtil import EsSearchUtil diff --git a/dsSchoolBuddy/ElasticSearch/T7_XiangLiangQuery.py b/dsSchoolBuddy/ElasticSearch/T7_XiangLiangQuery.py index 7e9cfc90..f91115bb 100644 --- a/dsSchoolBuddy/ElasticSearch/T7_XiangLiangQuery.py +++ b/dsSchoolBuddy/ElasticSearch/T7_XiangLiangQuery.py @@ -32,8 +32,7 @@ if __name__ == "__main__": print(f"3. 前3维向量值: {query_embedding[:3]}") print("4. 正在执行Elasticsearch向量搜索...") - vector_results = search_util.search_by_vector(query_embedding, k=5) - vector_hits = vector_results['hits']['hits'] + vector_hits = search_util.search_by_vector(query_embedding, k=5) print(f"5. 向量搜索结果数量: {len(vector_hits)}") # 向量结果重排 diff --git a/dsSchoolBuddy/ElasticSearch/Utils/EsSearchUtil.py b/dsSchoolBuddy/ElasticSearch/Utils/EsSearchUtil.py index 55b40a26..717cb22b 100644 --- a/dsSchoolBuddy/ElasticSearch/Utils/EsSearchUtil.py +++ b/dsSchoolBuddy/ElasticSearch/Utils/EsSearchUtil.py @@ -185,33 +185,33 @@ class EsSearchUtil: # 2. 从连接池获取连接 conn = search_util.es_pool.get_connection() - # 3. 检查索引是否存在,不存在则创建 + # # 3. 检查索引是否存在,不存在则创建 index_name = Config.ES_CONFIG['index_name'] - if not conn.indices.exists(index=index_name): - # 定义mapping结构 - mapping = { - "mappings": { - "properties": { - "embedding": { - "type": "dense_vector", - "dims": 1024, # 根据实际embedding维度调整 - "index": True, - "similarity": "l2_norm" - }, - "user_input": {"type": "text"}, - "tags": { - "type": "object", - "properties": { - "tags": {"type": "keyword"}, - "full_content": {"type": "text"} - } - }, - "timestamp": {"type": "date"} - } - } - } - conn.indices.create(index=index_name, body=mapping) - print(f"索引 '{index_name}' 创建成功") + # if not conn.indices.exists(index=index_name): + # # 定义mapping结构 + # mapping = { + # "mappings": { + # "properties": { + # "embedding": { + # "type": "dense_vector", + # "dims": Config.EMBED_DIM, # 根据实际embedding维度调整 + # "index": True, + # "similarity": "l2_norm" + # }, + # "user_input": {"type": "text"}, + # "tags": { + # "type": "object", + # "properties": { + # "tags": {"type": "keyword"}, + # "full_content": {"type": "text"} + # } + # }, + # "timestamp": {"type": "date"} + # } + # } + # } + # conn.indices.create(index=index_name, body=mapping) + # print(f"索引 '{index_name}' 创建成功") # 4. 切割文本 text_chunks = self.split_text_into_chunks(long_text) @@ -285,108 +285,128 @@ class EsSearchUtil: query_embedding = embeddings.embed_query(query) return query_embedding - def rerank_results(self, query: str, results: List[Dict]) -> List[Tuple[Dict, float]]: + def rerank_results(self, query: str, results: list) -> list: """ - 对搜索结果进行重排 + 使用重排模型对搜索结果进行重排 参数: query: 查询文本 results: 搜索结果列表 返回: - list: 重排后的结果列表,每个元素是(文档, 分数)元组 + list: 重排后的结果列表,每个元素是(文档对象, 分数)的元组 """ - if len(results) <= 1: - return [(doc, 1.0) for doc in results] - - # 准备重排请求数据 - rerank_data = { - "model": Config.RERANK_MODEL, - "query": query, - "documents": [doc['_source']['user_input'] for doc in results], - "top_n": len(results) - } - - # 调用API进行重排 - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {Config.RERANK_BINDING_API_KEY}" - } + if not results: + print("警告: 没有搜索结果可供重排") + return [] try: + # 准备重排请求数据 + # 确保doc是字典并包含'_source'和'user_input'字段 + documents = [] + valid_results = [] + for i, doc in enumerate(results): + if isinstance(doc, dict) and '_source' in doc and 'user_input' in doc['_source']: + documents.append(doc['_source']['user_input']) + valid_results.append(doc) + else: + print(f"警告: 结果项 {i} 格式不正确,跳过该结果") + print(f"结果项内容: {doc}") + + if not documents: + print("警告: 没有有效的文档可供重排") + # 返回原始结果,但转换为(结果, 分数)的元组格式 + return [(doc, doc.get('_score', 0.0)) for doc in results] + + rerank_data = { + "model": Config.RERANK_MODEL, + "query": query, + "documents": documents, + "top_n": len(documents) + } + + # 调用重排API + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {Config.RERANK_BINDING_API_KEY}" + } + response = requests.post(Config.RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data)) - response.raise_for_status() + response.raise_for_status() # 检查请求是否成功 rerank_result = response.json() # 处理重排结果 - reranked_docs_with_scores = [] + reranked_results = [] if "results" in rerank_result: for item in rerank_result["results"]: - # 尝试获取index和relevance_score字段 doc_idx = item.get("index") score = item.get("relevance_score", 0.0) - - # 如果找不到,尝试fallback到document和score字段 - if doc_idx is None: - doc_idx = item.get("document") - if score == 0.0: - score = item.get("score", 0.0) + if 0 <= doc_idx < len(valid_results): + result = valid_results[doc_idx] + reranked_results.append((result, score)) + else: + print("警告: 无法识别重排API响应格式") + # 返回原始结果,但转换为(结果, 分数)的元组格式 + reranked_results = [(doc, doc.get('_score', 0.0)) for doc in valid_results] - if doc_idx is not None and 0 <= doc_idx < len(results): - reranked_docs_with_scores.append((results[doc_idx], score)) - logger.debug(f"重排结果: 文档索引={doc_idx}, 分数={score}") - else: - logger.warning(f"重排结果项索引无效: {doc_idx}") + print(f"重排后结果数量:{len(reranked_results)}") + return reranked_results - # 如果没有有效的重排结果,返回原始结果 - if not reranked_docs_with_scores: - logger.warning("没有获取到有效的重排结果,返回原始结果") - return [(doc, 1.0) for doc in results] - - return reranked_docs_with_scores except Exception as e: - logger.error(f"重排失败: {str(e)}") - return [(doc, 1.0) for doc in results] + print(f"重排失败: {e}") + print("将使用原始搜索结果") + # 返回原始结果,但转换为(结果, 分数)的元组格式 + return [(doc, doc.get('_score', 0.0)) for doc in results] - def search_by_vector(self, query_embedding: list, k: int = 10) -> dict: + def search_by_vector(self, query_embedding: list, k: int = 10) -> list: """ - 在Elasticsearch中按向量搜索 + 根据向量进行相似性搜索 参数: query_embedding: 查询向量 - k: 返回结果数量 + k: 返回的结果数量 返回: - dict: 搜索结果 + list: 搜索结果列表 """ - # 从连接池获取连接 - conn = self.es_pool.get_connection() try: - # 构建向量搜索查询 - query = { - "query": { - "script_score": { - "query": { - "bool": { - "should": [], - "minimum_should_match": 0 - } - }, - "script": { - "source": "double score = cosineSimilarity(params.query_vector, 'embedding'); return score >= 0 ? score : 0", - "params": {"query_vector": query_embedding} - } - } - }, - "size": k - } + # 从连接池获取连接 + conn = self.es_pool.get_connection() + index_name = Config.ES_CONFIG['index_name'] + + # 执行向量搜索 + response = conn.search( + index=index_name, + body={ + "query": { + "script_score": { + "query": {"match_all": {}}, + "script": { + "source": "cosineSimilarity(params.query_vector, 'embedding') + 1.0", + "params": { + "query_vector": query_embedding + } + } + } + }, + "size": k + } + ) + + # 提取结果 + # 确保我们提取的是 hits.hits 部分 + if 'hits' in response and 'hits' in response['hits']: + results = response['hits']['hits'] + print(f"向量搜索结果数量: {len(results)}") + return results + else: + print("警告: 向量搜索响应格式不正确") + print(f"响应内容: {response}") + return [] - # 执行查询 - response = conn.search(index=self.es_config['index_name'], body=query) - return response except Exception as e: - logger.error(f"向量搜索失败: {str(e)}") - raise + print(f"向量搜索失败: {e}") + return [] finally: # 释放连接回连接池 self.es_pool.release_connection(conn) @@ -404,11 +424,53 @@ class EsSearchUtil: return print(f"找到 {len(results)} 条结果:\n") - for i, (result, score) in enumerate(results, 1): + for i, item in enumerate(results, 1): print(f"结果 {i}:") - print(f"内容: {result['_source']['user_input']}") - if show_score: - print(f"分数: {score:.4f}") + try: + # 检查item是否为元组格式 (result, score) + if isinstance(item, tuple): + if len(item) >= 2: + result, score = item[0], item[1] + else: + result, score = item[0], 0.0 + else: + # 如果不是元组,假设item就是result + result = item + score = result.get('_score', 0.0) + + # 确保result是字典类型 + if not isinstance(result, dict): + print(f"警告: 结果项 {i} 不是字典类型,跳过显示") + print(f"结果项内容: {result}") + print("---") + continue + + # 尝试获取user_input内容 + if '_source' in result and 'user_input' in result['_source']: + content = result['_source']['user_input'] + print(f"内容: {content}") + elif 'user_input' in result: + content = result['user_input'] + print(f"内容: {content}") + else: + print(f"警告: 结果项 {i} 缺少'user_input'字段") + print(f"结果项内容: {result}") + print("---") + continue + + # 显示分数 + if show_score: + print(f"分数: {score:.4f}") + + # 如果有标签信息,也显示出来 + if '_source' in result and 'tags' in result['_source']: + tags = result['_source']['tags'] + if isinstance(tags, dict) and 'tags' in tags: + print(f"标签: {tags['tags']}") + + except Exception as e: + print(f"处理结果项 {i} 时出错: {str(e)}") + print(f"结果项内容: {item}") print("---") def merge_results(self, keyword_results: List[Tuple[Dict, float]], vector_results: List[Tuple[Dict, float]]) -> List[Tuple[Dict, float, str]]: diff --git a/dsSchoolBuddy/ElasticSearch/Utils/VectorDBUtil.py b/dsSchoolBuddy/ElasticSearch/Utils/VectorDBUtil.py new file mode 100644 index 00000000..2cb6ccd9 --- /dev/null +++ b/dsSchoolBuddy/ElasticSearch/Utils/VectorDBUtil.py @@ -0,0 +1,125 @@ +# pip install pydantic requests +from langchain_core.documents import Document +from langchain_core.vectorstores import InMemoryVectorStore +from langchain_openai import OpenAIEmbeddings +from langchain_text_splitters import RecursiveCharacterTextSplitter +from pydantic import SecretStr +import requests +import json +from Config.Config import ( + EMBED_MODEL_NAME, EMBED_BASE_URL, EMBED_API_KEY, + RERANK_MODEL, RERANK_BASE_URL, RERANK_BINDING_API_KEY +) + + +class VectorDBUtil: + """向量数据库工具类,提供文本向量化存储和查询功能""" + + def __init__(self): + """初始化向量数据库工具""" + # 初始化嵌入模型 + self.embeddings = OpenAIEmbeddings( + model=EMBED_MODEL_NAME, + base_url=EMBED_BASE_URL, + api_key=SecretStr(EMBED_API_KEY) # 包装成 SecretStr 类型 + ) + # 初始化向量存储 + self.vector_store = None + + def text_to_vector_db(self, text: str, chunk_size: int = 200, chunk_overlap: int = 0) -> tuple: + """ + 将文本存入向量数据库 + + 参数: + text: 要入库的文本 + chunk_size: 文本分割块大小 + chunk_overlap: 文本块重叠大小 + + 返回: + tuple: (向量存储对象, 文档数量, 分割后的文档块数量) + """ + # 创建文档对象 + docs = [Document(page_content=text, metadata={"source": "simulated_document"})] + doc_count = len(docs) + print(f"文档数量:{doc_count} 个") + + # 切割文档 + text_splitter = RecursiveCharacterTextSplitter( + chunk_size=chunk_size, chunk_overlap=chunk_overlap, add_start_index=True + ) + all_splits = text_splitter.split_documents(docs) + split_count = len(all_splits) + print(f"切割后的文档块数量:{split_count}") + + # 向量存储 + self.vector_store = InMemoryVectorStore(self.embeddings) + ids = self.vector_store.add_documents(documents=all_splits) + + return self.vector_store, doc_count, split_count + + def query_vector_db(self, query: str, k: int = 4) -> list: + """ + 从向量数据库查询文本 + + 参数: + query: 查询字符串 + k: 要返回的结果数量 + + 返回: + list: 重排后的结果列表,每个元素是(文档对象, 可信度分数)的元组 + """ + if not self.vector_store: + print("错误: 向量数据库未初始化,请先调用text_to_vector_db方法") + return [] + + # 向量查询 - 获取更多结果用于重排 + results = self.vector_store.similarity_search(query, k=k) + print(f"向量搜索结果数量:{len(results)}") + + # 存储重排后的文档和分数 + reranked_docs_with_scores = [] + + # 调用重排模型 + if len(results) > 1: + # 准备重排请求数据 + rerank_data = { + "model": RERANK_MODEL, + "query": query, + "documents": [doc.page_content for doc in results], + "top_n": len(results) + } + + # 调用SiliconFlow API进行重排 + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {RERANK_BINDING_API_KEY}" + } + + try: + response = requests.post(RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data)) + response.raise_for_status() # 检查请求是否成功 + rerank_result = response.json() + + # 处理重排结果,提取relevance_score + if "results" in rerank_result: + for item in rerank_result["results"]: + doc_idx = item.get("index") + score = item.get("relevance_score", 0.0) + if 0 <= doc_idx < len(results): + reranked_docs_with_scores.append((results[doc_idx], score)) + else: + print("警告: 无法识别重排API响应格式") + reranked_docs_with_scores = [(doc, 0.0) for doc in results] + + print(f"重排后结果数量:{len(reranked_docs_with_scores)}") + except Exception as e: + print(f"重排模型调用失败: {e}") + print("将使用原始搜索结果") + reranked_docs_with_scores = [(doc, 0.0) for doc in results] + else: + # 只有一个结果,无需重排 + reranked_docs_with_scores = [(doc, 1.0) for doc in results] # 单个结果可信度设为1.0 + + return reranked_docs_with_scores + + diff --git a/dsSchoolBuddy/ElasticSearch/Utils/VectorUtil.py b/dsSchoolBuddy/ElasticSearch/Utils/VectorUtil.py deleted file mode 100644 index fb1a42c9..00000000 --- a/dsSchoolBuddy/ElasticSearch/Utils/VectorUtil.py +++ /dev/null @@ -1,115 +0,0 @@ -# pip install pydantic requests -from langchain_core.documents import Document -from langchain_core.vectorstores import InMemoryVectorStore -from langchain_openai import OpenAIEmbeddings -from langchain_text_splitters import RecursiveCharacterTextSplitter -from pydantic import SecretStr -import requests -import json -from Config.Config import ( - EMBED_MODEL_NAME, EMBED_BASE_URL, EMBED_API_KEY, - RERANK_MODEL, RERANK_BASE_URL, RERANK_BINDING_API_KEY -) - -def text_to_vector_db(text: str, chunk_size: int = 200, chunk_overlap: int = 0) -> tuple: - """ - 将文本存入向量数据库 - - 参数: - text: 要入库的文本 - chunk_size: 文本分割块大小 - chunk_overlap: 文本块重叠大小 - - 返回: - tuple: (向量存储对象, 文档数量, 分割后的文档块数量) - """ - # 创建文档对象 - docs = [Document(page_content=text, metadata={"source": "simulated_document"})] - doc_count = len(docs) - print(f"文档数量:{doc_count} 个") - - # 切割文档 - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=chunk_size, chunk_overlap=chunk_overlap, add_start_index=True - ) - all_splits = text_splitter.split_documents(docs) - split_count = len(all_splits) - print(f"切割后的文档块数量:{split_count}") - - # 嵌入模型 - embeddings = OpenAIEmbeddings( - model=EMBED_MODEL_NAME, - base_url=EMBED_BASE_URL, - api_key=SecretStr(EMBED_API_KEY) # 包装成 SecretStr 类型 - ) - - # 向量存储 - vector_store = InMemoryVectorStore(embeddings) - ids = vector_store.add_documents(documents=all_splits) - - return vector_store, doc_count, split_count - - -def query_vector_db(vector_store: InMemoryVectorStore, query: str, k: int = 4) -> list: - """ - 从向量数据库查询文本 - - 参数: - vector_store: 向量存储对象 - query: 查询字符串 - k: 要返回的结果数量 - - 返回: - list: 重排后的结果列表,每个元素是(文档对象, 可信度分数)的元组 - """ - # 向量查询 - 获取更多结果用于重排 - results = vector_store.similarity_search(query, k=k) - print(f"向量搜索结果数量:{len(results)}") - - # 存储重排后的文档和分数 - reranked_docs_with_scores = [] - - # 调用重排模型 - if len(results) > 1: - # 准备重排请求数据 - rerank_data = { - "model": RERANK_MODEL, - "query": query, - "documents": [doc.page_content for doc in results], - "top_n": len(results) - } - - # 调用SiliconFlow API进行重排 - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {RERANK_BINDING_API_KEY}" - } - - try: - response = requests.post(RERANK_BASE_URL, headers=headers, data=json.dumps(rerank_data)) - response.raise_for_status() # 检查请求是否成功 - rerank_result = response.json() - - # 处理重排结果,提取relevance_score - if "results" in rerank_result: - for item in rerank_result["results"]: - doc_idx = item.get("index") - score = item.get("relevance_score", 0.0) - if 0 <= doc_idx < len(results): - reranked_docs_with_scores.append((results[doc_idx], score)) - else: - print("警告: 无法识别重排API响应格式") - reranked_docs_with_scores = [(doc, 0.0) for doc in results] - - print(f"重排后结果数量:{len(reranked_docs_with_scores)}") - except Exception as e: - print(f"重排模型调用失败: {e}") - print("将使用原始搜索结果") - reranked_docs_with_scores = [(doc, 0.0) for doc in results] - else: - # 只有一个结果,无需重排 - reranked_docs_with_scores = [(doc, 1.0) for doc in results] # 单个结果可信度设为1.0 - - return reranked_docs_with_scores - - diff --git a/dsSchoolBuddy/ElasticSearch/Utils/__pycache__/EsSearchUtil.cpython-310.pyc b/dsSchoolBuddy/ElasticSearch/Utils/__pycache__/EsSearchUtil.cpython-310.pyc index f9158876c10cc14d8c160cf2c688b2e329422c67..baa3d6795d21468c4adf7c1e9841ea383bf3936c 100644 GIT binary patch delta 5148 zcmZ`-Yj6|S72dnMTCG-38$Wr9M+}0H010jCqzO=ygak+$NC728Rdm-zMwaZW6=1Wn zXlx!Sm`9d`1Upm;(2%se;5u$CvYGzKOp~;oBtv@6t}F{m z<=M0Mo_p@O=iWWvIp_M*O{YVigxjrf@Ds0}+;U*mMb8l7|HB!-COhXks84mYN^~ln z)-MJmR;rHR$0iIYkb6*|tTc~ii78{iLQxU5SjsG5rFIFZxb-LDZYEfB6}1{mO> zwVGE4`9bbY9;I3|h1TipG#IPxz4r#?j&NsZ zG~SlXE1fLSsWL69CW9`j#*=Djn;Ov)Y<ICZgo zl5W$T)IPwo4O$JAbq96SbKvz#Mt6d%iRfxwOLaM0t1E3Vkvbc>v`YsFA*6Igx9ctd zmY8;fvN}|zy8%?-#kZ*QR%%24r#`K|kK4wvS7=qKbyOGHg=~W^nwZE`gCCb307ql@ zhX4)_fMYf&o}_zj!2zH4^m;0kj+B*NyG3aY=p28{If#+6DSs```kO9PPuQ}Z!@of7 zaDGx9F}i9WC$o$zwaezSCirLpk>N*W=)HM|7VS`zTDYT=&BHP9W*pmOG}g_kHr>)` zxpXL{MPuuY)pb**PG&BYL0~W*3F3yK|FdU}V|Ba3Y!SB6wSr!?<3*K*Xd;s8P~%$C zRG*h(iAXq>oIpsxYkA*KaH475W^ADW3>`k`)YKgS%Ppz+)+Bq**ik>riT36N&|BGdZ`c4 z?>1Xiv>LGP0l#;PoWN}*Y!D0paBk(;nK&e#u9tLCcLOv5Xs}bS?rAlXM%PPmCv2t0 z+)Ayv6%n_R25yGdffA>cVNgJK;tm09zV7P)HXnek0Vdb$zVWkxUW2KsvGPBtH6|$4Zf-3Fa56n{)5q@XKw5rC?0&JcqUgkLG~M;O!+8NIDdKUV(xB# zVc(wOs}};J$NLMr`$n(!mnWtWW@Bf6Svd0ZW`FU$Tyf~-;^Cpu_um@5Y7|cl6%L)h zer?dwo41FOi4=>dCLF)2u9#>E-^g9|rx26=Vs@l3a_IV{%s}z%$d zgS}(tPg}BIp?{XA753~doV(oYPa)86vjz-$bsrdBd~I(rce2u;2%JFqhv-Idtb^x^ zsX*EwKZiCeX#yTavK$51&+)DN5x$FG$GyYnrBE`GVCpD{n!k4ltA_?8u&3e0;z&@B zMW$g3VfzRy`UNgFmFv0JXq-94STU`}4FKGT5|}6R#wXJ@XBc+zFoIsD#?)=$cto{U zWZgJ~w%@wlW7*>Ht7GT)6!!hfI`HLBuPPV=g)2v`ob6V+}L~Q^O55z&&1%Vi(~JfhVO#WrEp=7@icm#I`2neO4FpMM*f3L^25>7<&th>!OXk(c2u3OwP7M5J$dheD*A4K^s4dLF{> z;=hsY9=@8Fh==z8Z!$0M;=eWOXHL&_!XkOH^_Dsa(_c9kNO00UJ9{>oMKV_UR*g>} zzKM{}MP0@&r|M8${NX_@;`P8&>MJ3^wtB zG-9Jp4Pt*q43w}sF&Bmij|l@~K6PQ#gSQ(Khm|MVK<;))h@EoxPKcd$h@EvH@6qj* z@@Aym=uO*u?YB>*CKm%|<>C-VeW{P|Y{tWAF?+cx-Gr0_g&C&?m9>5?-2o|3IPP{4J}X z2b|w2|J#FayXPAZz{w*348Ae$ne(6*-Q5X~S^9rTj2&|pXFh}SMu2h$D-yWsQM*;4 z^t`JHx~Pdz@GOT?2ogV)x7yeb00@poC~l~&k>kz6DDJa(R4MgkT}<0_TNXT>Do{i~ z*`(VvsgF-aU=^3SihH~c%7DX`9UO+e==K}9u6pV-k+hRm0g`+sl6G1xS_MF|gKYJL ztH`}%&GL(?PkdHw_y)IK29mnGaE`Ar_)!zM2z^eoB+Ng0)&<68B8a22HnL)w4 z0KsIBE>28@atH2Iy^gredLwGbO@_0Axf+!WkdL(zC=N}CHxi|2l0$(prRv%g2U=l)jnhG4Rb*}bD@uK_xJ zsP9z&=;1x(`p2KbYl^>k?6-xzS8g1AquCEQn}8FhLiRHtd7GB%jHza!(!yRsws~nc zuj+~gH4{)ramdVR2e4%(p#fB2hkqAnaJ#W?F3rY6O|v(jK-{f|QBlU^RKr&TmUyD@ zE>dGAB}hI3pbJwGsSzMyK?~9qj9EMpMv-3)f8s6=-#{i4K;lhB5dS>f+w>Msg2v3i zLZD9tn#jM5ZGqM6`p_CFnP8eq^AaTEl$s2bws0J69t09}!oL*8GptpT;Vy!nR6Gjd zvpj;alXV!i^B0r98;{LjKI0)^z<*C#@Wr!LI1HNv*4hABAE-0lo5{tI whE)wqmpyFWKm*g*%P3)wCED8HpF_-yf-25@0m&X9LF3qhzmQW#$K9>}1(!u=P5=M^ delta 4377 zcmb7HYj7LY6~6cGYW0@cW5qFcMC>HEluS*X2aO?xBv2Ac5D7xoUE8u{$+=p^ z#H=hP4-y_u0-HdGI6;OoEi;rt!%U$teav(U%yg#HVfsgZ+8>oj`k0nsIxU4sdd{w7 zJIwH-((JeQoO|!NcX!Ws&bhC|KYGBI^?F?detZA;^4R;E-}KE>=Ag!F^wCW|)gLCj zLG`H(s$Xp!VPO&`BMNWh?Z@O;6TgwKQk}d*b-{W42hp*v(o8TX^^N^LWpf znlM-DZ1+x8HdXzs)3C@Dyk%yhs8 zJUpm*br^r1TwvA)ZsS2+LeVnv0O73#X;%7@vCy8Hewt2Sx6Cw&hK=S!SFgZu78z@ z8^Ak61RyGvo$BPU$~l-TyYe|z#8a86ye4{~_f;x-j0wkDcMk|EBKVb!;$=!*sj*raQlmgO!|`5XmtS1VP-2@J)m; z!XUzCfT$u?VtWYTc7!beWksmDd|DIRE*)|IhOUY^Clgw7ES^ZG;bE4>HsgeTW5nCl zeFrq_#un|EL(2{@HkE}}CAJxFcyn~^r7qvksd5ia{*H0kpQU?^eGPMM-@uinw3><9 ztxc`P5w&5o@oYD~qND^+|204ZnfEQYa;q3P`)4yB8t*oC6yrtHM@x3yrYkj%b?|2025t`CYWg%OIngT% zcJS$zJQ4Tu;3zE?nC2?Ec`Kl2fh@3sL-T5M+y|FmYXEFKN_l8bDmb+!-F}pACkJ-y zX9ZWmtu>bdy1UfE!wX=x6a?0KU=#2#x`(frW8!IG)=J&Q+gb^1qh0rahs(H?gX6lh z)T+Bj=|Yk6Rbf)_>Oft5GTp5^bT99imkT~H7D8jXk4NT!J=d9Az1$q~j(2L|qvQ|~ z@AEaa)`z+@E|pg3vdKj|(5H3*Cs!W+3^?g80x8#k;V#{O9sgPi{#pMrqn|7q{q#Dc zH-ORlWzIov*0*wo3U7TLw0+BGP80k`Y4@d6bC4FrM(_;Lfe=AhjnE0OSFFKi7lKJn z)7PRX-iWXcp%0)e^Ms~GWw9PL$TE|uWv7;!RC8Kla!T~0@@9a%51=PfIeEVF?$4?x z&s5lxFB-+b4%%aU5ZKxQM$m&TBM)H2X+HZwc%&n=9KtvkLXC`vu zA>(MU(~ZwmmcTr+J-FeP8IWbSi@7A1Th&VwXQ**W_yr zEe+Hq8GjGnMnlHN)^&8q*x&l6)p0NbaY2Eb^~7V?N+H-0#*G7^kYa|WgT|ww6GePC zi?QKZNQ4^EArWvof$jrI@EFM3$x*t803DGh@{+DL)&_i$Fj2w?Gm&t|JS)gEk7*DU zKnKx5vzHt?Bs}f}GTU@p$+jrL{cjR<5oc{}c zs7UQ+TwC#>@!C3vanCx(zPeHj!;mC=!o?rDJLFr~2O*y#Lq;KUA5aPuJ|h`n&#;4R z4`kC(=z-rqw}>{lLF)Q0+(h1>?ajV@(Ch`9pQ5dR(b?X=>noSYVfHnba9B$~QAfkX zS|-ZM_INIv7fIEkHm)UfT%-*b>4!yfxJZaV1b7%b(M@AUqWwVeFp9|km|v)A^-v;{ zRO7j1R;abT6S7+*p}%Z1wF2Kl9#i&}6LZy5&wl)_QN1`n5UD=byNI>Zg^{&s9#o0d45} z_=RK37y8G0PhbAU*~TA?1; z{y%d)XzoDWT(n6%fXiV_EPL)9x_8^|yW+d<-afR`_*%z-Vp)MYPR($$2rApSI-DOZ z+ouyE1J#&S9xYWyo@JF0ex{gPs3MV>h(kh1h{tf5U8wff)K2=t41lZsyGb>%XH$patQjv* zqVoGi-n@{stU^10SP39-#mK--9 z5us{%k*R4n4@Nddw$y}y$i|rGU|to6Yo~)K6Bw$>%2YxmCUYQS2!ImZ+>zJ`f|XUV zmM~NP^2e()W4E{ix~=NCjR^e2;xvMpmCf9T9tkeh+SRow4y4Fo{p0e;l*v}sm9MgE z%x-4f&!FW~J_$4{)Tj794?spXz2`{Rt$s|iHn_~f`Uo+8+jY}@Jy4%YaLEOmM!+{9 zA#%G_1cU4VPk>~DK2fjubS4I%#|>q;=e%s4!a(+#wQ4Je2p#nqzwZ8BaTr$0qjOqc zEb?DEkVghr9zR}r{hfh`m<7R%t(Mx1YXgxPY5mP3Gg4n)AJkA7!7T1iK&yTdLEeh9 z2m;~T2qx>ycw|C5F@EWljVo*Y9Z%vL!{d_d0LIY(3j(u((1MDD%Aa4lFZvRr!^VHs z^#OMGcGI_wUA;T$lg10ZD>prlw!a4ubHgzyZbgHf$k&&Q`SO_*_{3rlI)u=SzxCck zFB+@*wsqjkj#6yJ171+8w2Pqpikl-;Mk>)ZU4(8v&1SWhs@>%5plD)5L?Q zJcDou;SmH(2EvVi+%MZk(%FP2PGMW-siY*o;ty$?lc)cYT~td4&(_ zyeZ^GMGWhLDdl5IEUb&BoR2Hxk5gvWZ1l1Il7w?kwcoyly#AU)OEz*7F5A@>@OU6lgCz zIy7fqDe6{7cUS(PPKaUk+tBvxUOneh+Zfh&4ro-%IXc~^A8~q%rr|&xG~&H_j_OXY zLwmB4pXu7YqpMTh{Yq!oF14p)cbDG~we0BV?NVReyUR~U?;U%()t9?o_fvbj_IC8V z9F6cBt8aDf4Yh{up3d%`m(=R89NX3M5{H);;jwpt(2;@(QaFMYo^XU8LyAC}iKL4p zrb|SIC$7i(WRf7sF;S7pI+BLIhOj-}Cy_>yf%XJx0uQ$MsmM*8J6?4R)9r!@f-`3q zPfS$ayImRoV{rdmaPL}h?B4vH2VXxtRhc+5_t#663nwa*pG9iHsSCla_k*`?1n-`z zT)DO|`%&=rm4(r-au_Vt3ag9o?1aaD7KDQw>~Ie62&0(bao$6!NH~xZ;b2v~%anqX zQ^6$P?q<_e1YhuA|KRjU3ICc#I&NJ4(TI3*%%eT0*x^tGa)g-{OBk2It2XG zaEKAru7X6cb+hj#q5ypK@2<}F1H|56phNJthZY7w7izt^fkMID;Sw@(pv%;3$H?{S z8qEzHU>>v&Eca(yam1HZ)vydlRhdyc1%l%_%wY-3|4B%t^(*vix{H4UJ<9y%3l2ci zPviz%Ye=x}&Y0>T4?Imc0~3Cn>&Iom!5(w*ZIj^>;R$ zxud;x;rj2`!29RsKOBoptx#I>R(or8rjy5m-+vxVT?{_{B)C1XhIZAT^#M~C7f*}_ z6VufJ^$`n?&Igw+uUeE_H1+n@7pkO)7%}_h!kw8d;rugKVIRP%Ev-Scjx(pL3!k|P zYl=Fv^(lPGF1R$OE6ui%cTLUFiAriAj`LGRtzTDj1U;qd>~J>Nq-!}! znjN|tnufTnBs9~E-fTY^O&IF=yg}@cypdQ3X8opctW!{{Tqg&gfo7Yzp*bz%4CZ(V zOaDvoGH$>r9v-gr>$`fR?<*m>faL--$+XwJ$a)ltp$c zI>6s4jwB|zvQ&D_kp^Rq>`6|1I^l^km?VM8F)ucqoIx1De|!d&Ws>sbVMI$F)Hb0` z^v3hR+4yvdq=D^;u+~5tXSi~bWW0occr=7*KAL$5(IXaz(V=~wFrD^zFR=-E$v$od zc6ex?m4qENvE8zJskUYoamYFt4^gMK&;7^~dXa#fhLJyGR8C=3+5}oHUaF=6Z|g%# z8dw=T|qc75)P&+9`VHnk3gktvE0K=Ano=PH*cBX%!EwY36>Y1k0eAmCI^zZ)?y z8XKIvxG-~rUA?mtz~bQ8==|O3%KNh~L_80wF(#OpoqIGB+`kUt1*Xfw+)VYsSbcSF zH-K{S{09rOv+b>`T`>7+{j!Cr7H01S$4)NX9-Tjb<1y@4E__+}(|cJ=e+>5Ar$JPW zotd1!`_b|ZL(EuYH`t0)fJF^PmtJ^mb|dx!9o`Vj*u#V9EO!X5|B$;d0keIR^SQRI zJ`Vw5HLnxfn9p*Bd~y40%owxWj7nQqP&YiXCFZwnGYdJ*w6{a0HaIKL1{g|z!~_cs zis%%Is#VHpMTliN%`pnrPY3OSRf_F^i(aR#S+11Y8Aacp>>DYgNGB3xt9ee=H72wK!om#!PVeod`F<$9ER)UTbfS5OVwY z+*gkPO~L6akkVC_LFIc_Pk#<}=`WaIFrnYK&SBR1OAtLzHkHNwpakV#zj?C35zq+@ zW*fsRi#`VpN)uefhN)4*8By&p-J>i=r7YA_melz&)d6ib9I{X>*bYsyMuFkR7m1rM z+De9M24t#z17aHdNM@z)xVVg^|8n96EVCbvZJnpy3MsVvh_RmzCq^Kd4RAV9(-IxJhG&$Cv6#R1yECJ14+Bv|r5*=^2}7n_~4 zj07xmo0FTL3){0T%0^SO`f~%CVL_ro^){F;{0z0j>_~ZfRd