Fix linting
This commit is contained in:
@@ -916,7 +916,7 @@ class LightRAG:
|
||||
else self.key_string_value_json_storage_cls(
|
||||
namespace="llm_response_cache",
|
||||
global_config=asdict(self),
|
||||
embedding_func=self.embedding_func,
|
||||
embedding_func=self.embedding_func,
|
||||
),
|
||||
prompt=prompt,
|
||||
)
|
||||
|
@@ -368,7 +368,9 @@ async def get_best_cached_response(
|
||||
original_prompt=None,
|
||||
cache_type=None,
|
||||
) -> Union[str, None]:
|
||||
logger.debug(f"get_best_cached_response: mode={mode} cache_type={cache_type} use_llm_check={use_llm_check}")
|
||||
logger.debug(
|
||||
f"get_best_cached_response: mode={mode} cache_type={cache_type} use_llm_check={use_llm_check}"
|
||||
)
|
||||
mode_cache = await hashing_kv.get_by_id(mode)
|
||||
if not mode_cache:
|
||||
return None
|
||||
@@ -511,7 +513,7 @@ async def handle_cache(
|
||||
if is_embedding_cache_enabled:
|
||||
# Use embedding cache
|
||||
current_embedding = await hashing_kv.embedding_func([prompt])
|
||||
llm_model_func = hashing_kv.global_config.get('llm_model_func')
|
||||
llm_model_func = hashing_kv.global_config.get("llm_model_func")
|
||||
quantized, min_val, max_val = quantize_embedding(current_embedding[0])
|
||||
best_cached_response = await get_best_cached_response(
|
||||
hashing_kv,
|
||||
|
Reference in New Issue
Block a user