From 443aab2882a5ddca2fc86eac3ac0c6616362eef2 Mon Sep 17 00:00:00 2001 From: Magic_yuan <317617749@qq.com> Date: Fri, 24 Jan 2025 10:15:25 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=BD=93=E5=87=BA?= =?UTF-8?q?=E7=8E=B0=E5=BC=82=E5=B8=B8=E6=97=B6=EF=BC=8C=E4=BC=9A=E5=AF=BC?= =?UTF-8?q?=E8=87=B4=E6=9B=B4=E6=96=B0=E6=95=B0=E6=8D=AE=E5=8D=A1=E6=AD=BB?= =?UTF-8?q?=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- lightrag/lightrag.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index edec85f1..f36f73b0 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -469,9 +469,8 @@ class LightRAG: error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}" logger.error(error_msg) continue - - finally: - # Ensure all indexes are updated after each document + else: + # Only update index when processing succeeds await self._insert_done() def insert_custom_chunks(self, full_text: str, text_chunks: list[str]): From f6d29e17930a425f32ac32b179962ab46189f94a Mon Sep 17 00:00:00 2001 From: Magic_yuan <317617749@qq.com> Date: Fri, 24 Jan 2025 11:01:32 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=BC=93=E5=AD=98?= =?UTF-8?q?=E5=A4=84=E7=90=86=E7=9A=84=E8=BF=90=E8=A1=8C=E6=97=B6=E9=94=99?= =?UTF-8?q?=E8=AF=AF=20].func=20=20#=20["func"]=20=20^^^^=20AttributeError?= =?UTF-8?q?:=20'dict'=20object=20has=20no=20attribute=20'func'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- lightrag/utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lightrag/utils.py b/lightrag/utils.py index ce556ab2..f36e03e2 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -479,9 +479,7 @@ async def handle_cache(hashing_kv, args_hash, prompt, mode="default"): quantized = min_val = max_val = None if is_embedding_cache_enabled: # Use embedding cache - embedding_model_func = hashing_kv.global_config[ - "embedding_func" - ].func # ["func"] + embedding_model_func = hashing_kv.global_config["embedding_func"]["func"] llm_model_func = hashing_kv.global_config.get("llm_model_func") current_embedding = await embedding_model_func([prompt])