Fix linting

This commit is contained in:
yangdx
2025-03-10 02:07:19 +08:00
parent 14e1b31d1c
commit 4065a7df92
6 changed files with 44 additions and 25 deletions

View File

@@ -362,9 +362,7 @@ def parse_args(is_uvicorn_mode: bool = False) -> argparse.Namespace:
# Inject LLM cache configuration # Inject LLM cache configuration
args.enable_llm_cache_for_extract = get_env_value( args.enable_llm_cache_for_extract = get_env_value(
"ENABLE_LLM_CACHE_FOR_EXTRACT", "ENABLE_LLM_CACHE_FOR_EXTRACT", False, bool
False,
bool
) )
ollama_server_infos.LIGHTRAG_MODEL = args.simulated_model_name ollama_server_infos.LIGHTRAG_MODEL = args.simulated_model_name

View File

@@ -96,11 +96,15 @@ class JsonDocStatusStorage(DocStatusStorage):
async def index_done_callback(self) -> None: async def index_done_callback(self) -> None:
async with self._storage_lock: async with self._storage_lock:
if (is_multiprocess and self.storage_updated.value) or (not is_multiprocess and self.storage_updated): if (is_multiprocess and self.storage_updated.value) or (
not is_multiprocess and self.storage_updated
):
data_dict = ( data_dict = (
dict(self._data) if hasattr(self._data, "_getvalue") else self._data dict(self._data) if hasattr(self._data, "_getvalue") else self._data
) )
logger.info(f"Process {os.getpid()} doc status writting {len(data_dict)} records to {self.namespace}") logger.info(
f"Process {os.getpid()} doc status writting {len(data_dict)} records to {self.namespace}"
)
write_json(data_dict, self._file_name) write_json(data_dict, self._file_name)
await clear_all_update_flags(self.namespace) await clear_all_update_flags(self.namespace)

View File

@@ -48,17 +48,24 @@ class JsonKVStorage(BaseKVStorage):
# Calculate data count based on namespace # Calculate data count based on namespace
if self.namespace.endswith("cache"): if self.namespace.endswith("cache"):
# For cache namespaces, sum the cache entries across all cache types # For cache namespaces, sum the cache entries across all cache types
data_count = sum(len(first_level_dict) for first_level_dict in loaded_data.values() data_count = sum(
if isinstance(first_level_dict, dict)) len(first_level_dict)
for first_level_dict in loaded_data.values()
if isinstance(first_level_dict, dict)
)
else: else:
# For non-cache namespaces, use the original count method # For non-cache namespaces, use the original count method
data_count = len(loaded_data) data_count = len(loaded_data)
logger.info(f"Process {os.getpid()} KV load {self.namespace} with {data_count} records") logger.info(
f"Process {os.getpid()} KV load {self.namespace} with {data_count} records"
)
async def index_done_callback(self) -> None: async def index_done_callback(self) -> None:
async with self._storage_lock: async with self._storage_lock:
if (is_multiprocess and self.storage_updated.value) or (not is_multiprocess and self.storage_updated): if (is_multiprocess and self.storage_updated.value) or (
not is_multiprocess and self.storage_updated
):
data_dict = ( data_dict = (
dict(self._data) if hasattr(self._data, "_getvalue") else self._data dict(self._data) if hasattr(self._data, "_getvalue") else self._data
) )
@@ -66,17 +73,21 @@ class JsonKVStorage(BaseKVStorage):
# Calculate data count based on namespace # Calculate data count based on namespace
if self.namespace.endswith("cache"): if self.namespace.endswith("cache"):
# # For cache namespaces, sum the cache entries across all cache types # # For cache namespaces, sum the cache entries across all cache types
data_count = sum(len(first_level_dict) for first_level_dict in data_dict.values() data_count = sum(
if isinstance(first_level_dict, dict)) len(first_level_dict)
for first_level_dict in data_dict.values()
if isinstance(first_level_dict, dict)
)
else: else:
# For non-cache namespaces, use the original count method # For non-cache namespaces, use the original count method
data_count = len(data_dict) data_count = len(data_dict)
logger.info(f"Process {os.getpid()} KV writting {data_count} records to {self.namespace}") logger.info(
f"Process {os.getpid()} KV writting {data_count} records to {self.namespace}"
)
write_json(data_dict, self._file_name) write_json(data_dict, self._file_name)
await clear_all_update_flags(self.namespace) await clear_all_update_flags(self.namespace)
async def get_all(self) -> dict[str, Any]: async def get_all(self) -> dict[str, Any]:
"""Get all data from storage """Get all data from storage

View File

@@ -344,6 +344,7 @@ async def set_all_update_flags(namespace: str):
else: else:
_update_flags[namespace][i] = True _update_flags[namespace][i] = True
async def clear_all_update_flags(namespace: str): async def clear_all_update_flags(namespace: str):
"""Clear all update flag of namespace indicating all workers need to reload data from files""" """Clear all update flag of namespace indicating all workers need to reload data from files"""
global _update_flags global _update_flags
@@ -360,6 +361,7 @@ async def clear_all_update_flags(namespace: str):
else: else:
_update_flags[namespace][i] = False _update_flags[namespace][i] = False
async def get_all_update_flags_status() -> Dict[str, list]: async def get_all_update_flags_status() -> Dict[str, list]:
""" """
Get update flags status for all namespaces. Get update flags status for all namespaces.

View File

@@ -354,7 +354,9 @@ class LightRAG:
namespace=make_namespace( namespace=make_namespace(
self.namespace_prefix, NameSpace.KV_STORE_LLM_RESPONSE_CACHE self.namespace_prefix, NameSpace.KV_STORE_LLM_RESPONSE_CACHE
), ),
global_config=asdict(self), # Add global_config to ensure cache works properly global_config=asdict(
self
), # Add global_config to ensure cache works properly
embedding_func=self.embedding_func, embedding_func=self.embedding_func,
) )

View File

@@ -733,7 +733,9 @@ async def save_to_cache(hashing_kv, cache_data: CacheData):
if cache_data.args_hash in mode_cache: if cache_data.args_hash in mode_cache:
existing_content = mode_cache[cache_data.args_hash].get("return") existing_content = mode_cache[cache_data.args_hash].get("return")
if existing_content == cache_data.content: if existing_content == cache_data.content:
logger.info(f"Cache content unchanged for {cache_data.args_hash}, skipping update") logger.info(
f"Cache content unchanged for {cache_data.args_hash}, skipping update"
)
return return
# Update cache with new content # Update cache with new content