@@ -469,9 +469,8 @@ class LightRAG:
|
|||||||
error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}"
|
error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}"
|
||||||
logger.error(error_msg)
|
logger.error(error_msg)
|
||||||
continue
|
continue
|
||||||
|
else:
|
||||||
finally:
|
# Only update index when processing succeeds
|
||||||
# Ensure all indexes are updated after each document
|
|
||||||
await self._insert_done()
|
await self._insert_done()
|
||||||
|
|
||||||
def insert_custom_chunks(self, full_text: str, text_chunks: list[str]):
|
def insert_custom_chunks(self, full_text: str, text_chunks: list[str]):
|
||||||
|
@@ -479,9 +479,7 @@ async def handle_cache(hashing_kv, args_hash, prompt, mode="default"):
|
|||||||
quantized = min_val = max_val = None
|
quantized = min_val = max_val = None
|
||||||
if is_embedding_cache_enabled:
|
if is_embedding_cache_enabled:
|
||||||
# Use embedding cache
|
# Use embedding cache
|
||||||
embedding_model_func = hashing_kv.global_config[
|
embedding_model_func = hashing_kv.global_config["embedding_func"]["func"]
|
||||||
"embedding_func"
|
|
||||||
].func # ["func"]
|
|
||||||
llm_model_func = hashing_kv.global_config.get("llm_model_func")
|
llm_model_func = hashing_kv.global_config.get("llm_model_func")
|
||||||
|
|
||||||
current_embedding = await embedding_model_func([prompt])
|
current_embedding = await embedding_model_func([prompt])
|
||||||
|
Reference in New Issue
Block a user