diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 5ae0a47f..d77af71d 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -916,7 +916,7 @@ class LightRAG: else self.key_string_value_json_storage_cls( namespace="llm_response_cache", global_config=asdict(self), - embedding_func=self.embedding_func, + embedding_func=self.embedding_func, ), prompt=prompt, ) diff --git a/lightrag/utils.py b/lightrag/utils.py index 2ae067ba..3a69513b 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -368,7 +368,9 @@ async def get_best_cached_response( original_prompt=None, cache_type=None, ) -> Union[str, None]: - logger.debug(f"get_best_cached_response: mode={mode} cache_type={cache_type} use_llm_check={use_llm_check}") + logger.debug( + f"get_best_cached_response: mode={mode} cache_type={cache_type} use_llm_check={use_llm_check}" + ) mode_cache = await hashing_kv.get_by_id(mode) if not mode_cache: return None @@ -511,7 +513,7 @@ async def handle_cache( if is_embedding_cache_enabled: # Use embedding cache current_embedding = await hashing_kv.embedding_func([prompt]) - llm_model_func = hashing_kv.global_config.get('llm_model_func') + llm_model_func = hashing_kv.global_config.get("llm_model_func") quantized, min_val, max_val = quantize_embedding(current_embedding[0]) best_cached_response = await get_best_cached_response( hashing_kv,