From 8484564f50dceb826b0e7f442bf3c81b15f74360 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 2 Feb 2025 03:54:41 +0800 Subject: [PATCH] Fix llm_model_func retrieval error. --- lightrag/utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lightrag/utils.py b/lightrag/utils.py index f2c2c610..2ae067ba 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -368,7 +368,7 @@ async def get_best_cached_response( original_prompt=None, cache_type=None, ) -> Union[str, None]: - logger.debug(f"get_best_cached_response: mode={mode} cache_type={cache_type}") + logger.debug(f"get_best_cached_response: mode={mode} cache_type={cache_type} use_llm_check={use_llm_check}") mode_cache = await hashing_kv.get_by_id(mode) if not mode_cache: return None @@ -511,11 +511,7 @@ async def handle_cache( if is_embedding_cache_enabled: # Use embedding cache current_embedding = await hashing_kv.embedding_func([prompt]) - llm_model_func = ( - hashing_kv.llm_model_func - if hasattr(hashing_kv, "llm_model_func") - else None - ) + llm_model_func = hashing_kv.global_config.get('llm_model_func') quantized, min_val, max_val = quantize_embedding(current_embedding[0]) best_cached_response = await get_best_cached_response( hashing_kv,