From 5d14ab03eb32e7c4685e0bbeb0170b61cb30e786 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 2 Feb 2025 01:56:32 +0800 Subject: [PATCH] Fix linting --- lightrag/operate.py | 13 ++++++++++--- lightrag/utils.py | 15 ++++++++++++--- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/lightrag/operate.py b/lightrag/operate.py index 0e1eb3f3..c8c50f61 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -352,7 +352,6 @@ async def extract_entities( input_text: str, history_messages: list[dict[str, str]] = None ) -> str: if enable_llm_cache_for_entity_extract and llm_response_cache: - if history_messages: history = json.dumps(history_messages, ensure_ascii=False) _prompt = history + "\n" + input_text @@ -381,7 +380,12 @@ async def extract_entities( res: str = await use_llm_func(input_text) await save_to_cache( llm_response_cache, - CacheData(args_hash=arg_hash, content=res, prompt=_prompt, cache_type="extract"), + CacheData( + args_hash=arg_hash, + content=res, + prompt=_prompt, + cache_type="extract", + ), ) return res @@ -747,7 +751,10 @@ async def extract_keywords_only( # 7. Cache only the processed keywords with cache type if hl_keywords or ll_keywords: - cache_data = {"high_level_keywords": hl_keywords, "low_level_keywords": ll_keywords} + cache_data = { + "high_level_keywords": hl_keywords, + "low_level_keywords": ll_keywords, + } await save_to_cache( hashing_kv, CacheData( diff --git a/lightrag/utils.py b/lightrag/utils.py index 1bd06e6d..dfd68c72 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -484,10 +484,17 @@ def dequantize_embedding( async def handle_cache( - hashing_kv, args_hash, prompt, mode="default", cache_type=None, force_llm_cache=False + hashing_kv, + args_hash, + prompt, + mode="default", + cache_type=None, + force_llm_cache=False, ): """Generic cache handling function""" - if hashing_kv is None or not (force_llm_cache or hashing_kv.global_config.get("enable_llm_cache")): + if hashing_kv is None or not ( + force_llm_cache or hashing_kv.global_config.get("enable_llm_cache") + ): return None, None, None, None if mode != "default": @@ -504,7 +511,9 @@ async def handle_cache( # Use embedding cache current_embedding = await hashing_kv.embedding_func([prompt]) llm_model_func = ( - hashing_kv.llm_model_func if hasattr(hashing_kv, "llm_model_func") else None + hashing_kv.llm_model_func + if hasattr(hashing_kv, "llm_model_func") + else None ) quantized, min_val, max_val = quantize_embedding(current_embedding[0]) best_cached_response = await get_best_cached_response(