Merge pull request #634 from magicyuan876/main

修复当出现异常时,会导致更新数据卡死的bug
This commit is contained in:
zrguo
2025-01-24 16:30:50 +08:00
committed by GitHub
2 changed files with 3 additions and 6 deletions

View File

@@ -469,9 +469,8 @@ class LightRAG:
error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}" error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}"
logger.error(error_msg) logger.error(error_msg)
continue continue
else:
finally: # Only update index when processing succeeds
# Ensure all indexes are updated after each document
await self._insert_done() await self._insert_done()
def insert_custom_chunks(self, full_text: str, text_chunks: list[str]): def insert_custom_chunks(self, full_text: str, text_chunks: list[str]):

View File

@@ -479,9 +479,7 @@ async def handle_cache(hashing_kv, args_hash, prompt, mode="default"):
quantized = min_val = max_val = None quantized = min_val = max_val = None
if is_embedding_cache_enabled: if is_embedding_cache_enabled:
# Use embedding cache # Use embedding cache
embedding_model_func = hashing_kv.global_config[ embedding_model_func = hashing_kv.global_config["embedding_func"]["func"]
"embedding_func"
].func # ["func"]
llm_model_func = hashing_kv.global_config.get("llm_model_func") llm_model_func = hashing_kv.global_config.get("llm_model_func")
current_embedding = await embedding_model_func([prompt]) current_embedding = await embedding_model_func([prompt])